HΦ  3.2.0
expec_totalspin.c File Reference

File for calculating total spin. More...

#include <bitcalc.h>
#include "mltplyCommon.h"
#include "mltplySpinCore.h"
#include "wrapperMPI.h"
#include "mltplyMPISpin.h"
#include "mltplyMPISpinCore.h"
#include "expec_totalspin.h"
+ Include dependency graph for expec_totalspin.c:

Go to the source code of this file.

Functions

int expec_totalspin (struct BindStruct *X, double complex *vec)
 Parent function of calculation of total spin. More...
 
void totalspin_Hubbard (struct BindStruct *X, double complex *vec)
 function of calculating totalspin for Hubbard model More...
 
void totalspin_HubbardGC (struct BindStruct *X, double complex *vec)
 function of calculating totalspin for Hubbard model in grand canonical ensemble More...
 
void totalspin_Spin (struct BindStruct *X, double complex *vec)
 function of calculating totalspin for spin model More...
 
void totalspin_SpinGC (struct BindStruct *X, double complex *vec)
 function of calculating totalspin for spin model in grand canonical ensemble More...
 
int expec_totalSz (struct BindStruct *X, double complex *vec)
 
void totalSz_HubbardGC (struct BindStruct *X, double complex *vec)
 function of calculating totalSz for Hubbard model in grand canonical ensemble More...
 
void totalSz_SpinGC (struct BindStruct *X, double complex *vec)
 function of calculating totalSz for Spin model in grand canonical ensemble More...
 

Detailed Description

File for calculating total spin.

Version
0.2

modify to treat the case of general spin

Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition in file expec_totalspin.c.

Function Documentation

◆ expec_totalspin()

int expec_totalspin ( struct BindStruct X,
double complex *  vec 
)

Parent function of calculation of total spin.

Parameters
[in,out]Xdata list of calculation parameters
[in]veceigenvector
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)
Return values
0calculation is normally finished

Definition at line 50 of file expec_totalspin.c.

References BindStruct::Def, DefineList::iCalcModel, BindStruct::Large, LargeList::mode, BindStruct::Phys, PhysList::s2, PhysList::Sz, DefineList::Total2SzMPI, totalspin_Hubbard(), totalspin_HubbardGC(), totalspin_Spin(), and totalspin_SpinGC().

Referenced by phys().

54 {
55  X->Large.mode = M_TOTALS;
56  switch(X->Def.iCalcModel){
57  case Spin:
59  X->Phys.Sz=X->Def.Total2SzMPI/2.;
60  break;
61  case SpinGC:
63  break;
64  case Hubbard:
65  case Kondo:
67  break;
68  case HubbardGC:
69  case KondoGC:
71  break;
72  default:
73  X->Phys.s2=0.0;
74  X->Phys.Sz=0.0;
75  }
76  return 0;
77 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex ** vec
Definition: global.h:45
struct LargeList Large
Variables for Matrix-Vector product.
Definition: struct.h:412
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
int mode
multiply or expectation value.
Definition: struct.h:330
void totalspin_Hubbard(struct BindStruct *X, double complex *vec)
function of calculating totalspin for Hubbard model
void totalspin_SpinGC(struct BindStruct *X, double complex *vec)
function of calculating totalspin for spin model in grand canonical ensemble
void totalspin_Spin(struct BindStruct *X, double complex *vec)
function of calculating totalspin for spin model
double s2
Expectation value of the square of the total S.
Definition: struct.h:370
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
void totalspin_HubbardGC(struct BindStruct *X, double complex *vec)
function of calculating totalspin for Hubbard model in grand canonical ensemble
int Total2SzMPI
Total across processes.
Definition: struct.h:70
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ expec_totalSz()

int expec_totalSz ( struct BindStruct X,
double complex *  vec 
)

Definition at line 649 of file expec_totalspin.c.

References BindStruct::Def, DefineList::iCalcModel, BindStruct::Large, LargeList::mode, BindStruct::Phys, PhysList::Sz, DefineList::Total2SzMPI, totalSz_HubbardGC(), and totalSz_SpinGC().

Referenced by CalcByLanczos().

652  {
653  X->Large.mode = M_TOTALS;
654  switch (X->Def.iCalcModel) {
655  case Spin:
656  X->Phys.Sz = X->Def.Total2SzMPI / 2.;
657  break;
658  case SpinGC:
659  totalSz_SpinGC(X, vec);
660  break;
661  case Hubbard:
662  case Kondo:
663  X->Phys.Sz = X->Def.Total2SzMPI / 2.;
664 
665  break;
666  case HubbardGC:
667  case KondoGC:
669  break;
670  default:
671  X->Phys.Sz = 0.0;
672  }
673 
674  return 0;
675 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex ** vec
Definition: global.h:45
struct LargeList Large
Variables for Matrix-Vector product.
Definition: struct.h:412
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
int mode
multiply or expectation value.
Definition: struct.h:330
void totalSz_SpinGC(struct BindStruct *X, double complex *vec)
function of calculating totalSz for Spin model in grand canonical ensemble
void totalSz_HubbardGC(struct BindStruct *X, double complex *vec)
function of calculating totalSz for Hubbard model in grand canonical ensemble
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
int Total2SzMPI
Total across processes.
Definition: struct.h:70
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ totalspin_Hubbard()

void totalspin_Hubbard ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalspin for Hubbard model

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 88 of file expec_totalspin.c.

References BindStruct::Check, BindStruct::Def, GetOffComp(), GetSplitBitByModel(), DefineList::iCalcModel, CheckList::idim_max, list_1, list_2_1, list_2_2, DefineList::Nsite, DefineList::NsiteMPI, BindStruct::Phys, PhysList::s2, PhysList::Sz, and DefineList::Tpow.

Referenced by expec_totalspin().

88  {
89  long unsigned int j;
90  long unsigned int irght, ilft, ihfbit;
91  long unsigned int isite1, isite2;
92  long unsigned int is1_up, is2_up, is1_down, is2_down;
93  long unsigned int iexchg, off;
94  int num1_up, num2_up;
95  int num1_down, num2_down;
96  long unsigned int ibit1_up, ibit2_up, ibit1_down, ibit2_down;
97  double complex spn_z, tmp_spn_z;
98  double complex spn;
99  long unsigned i_max;
100  i_max = X->Check.idim_max;
101 
102  GetSplitBitByModel(X->Def.Nsite, X->Def.iCalcModel, &irght, &ilft, &ihfbit);
103  spn = 0.0;
104  spn_z = 0.0;
105  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
106  is1_up = X->Def.Tpow[2 * isite1 - 2];
107  is1_down = X->Def.Tpow[2 * isite1 - 1];
108 
109  for (isite2 = 1; isite2 <= X->Def.NsiteMPI; isite2++) {
110  is2_up = X->Def.Tpow[2 * isite2 - 2];
111  is2_down = X->Def.Tpow[2 * isite2 - 1];
112 
113 #pragma omp parallel for reduction(+:spn, spn_z) default(none) firstprivate(i_max, is1_up, is1_down, is2_up, is2_down, irght, ilft, ihfbit, isite1, isite2) private(ibit1_up, num1_up, ibit2_up, num2_up, ibit1_down, num1_down, ibit2_down, num2_down, tmp_spn_z, iexchg, off) shared(vec, list_1, list_2_1, list_2_2)
114  for (j = 1; j <= i_max; j++) {
115 
116  ibit1_up = list_1[j] & is1_up;
117  num1_up = ibit1_up / is1_up;
118  ibit2_up = list_1[j] & is2_up;
119  num2_up = ibit2_up / is2_up;
120 
121  ibit2_down = list_1[j] & is2_down;
122  num2_down = ibit2_down / is2_down;
123  ibit1_down = list_1[j] & is1_down;
124  num1_down = ibit1_down / is1_down;
125 
126  tmp_spn_z = (num1_up - num1_down) * (num2_up - num2_down);
127  spn += conj(vec[j]) * vec[j] * tmp_spn_z / 4.0;
128  if (isite1 == isite2) {
129  spn += conj(vec[j]) * vec[j] * (num1_up + num1_down - 2 * num1_up * num1_down) / 2.0;
130  spn_z += conj(vec[j]) * vec[j] * (num1_up - num1_down) / 2.0;
131  } else {
132  if (ibit1_up != 0 && ibit1_down == 0 && ibit2_up == 0 && ibit2_down != 0) {
133  iexchg = list_1[j] - (is1_up + is2_down);
134  iexchg += (is2_up + is1_down);
135  GetOffComp(list_2_1, list_2_2, iexchg, irght, ilft, ihfbit, &off);
136  spn += conj(vec[j]) * vec[off] / 2.0;
137  } else if (ibit1_up == 0 && ibit1_down != 0 && ibit2_up != 0 && ibit2_down == 0) {
138  iexchg = list_1[j] - (is1_down + is2_up);
139  iexchg += (is2_down + is1_up);
140  GetOffComp(list_2_1, list_2_2, iexchg, irght, ilft, ihfbit, &off);
141  spn += conj(vec[j]) * vec[off] / 2.0;
142  }
143  }
144  }
145  }
146  }
147  X->Phys.s2 = creal(spn);
148  X->Phys.Sz = creal(spn_z);
149 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
double complex ** vec
Definition: global.h:45
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
int GetSplitBitByModel(const int Nsite, const int iCalcModel, long unsigned int *irght, long unsigned int *ilft, long unsigned int *ihfbit)
function of splitting original bit into right and left spaces.
Definition: bitcalc.c:78
long unsigned int * list_2_1
Definition: global.h:49
int GetOffComp(long unsigned int *_list_2_1, long unsigned int *_list_2_2, long unsigned int _ibit, const long unsigned int _irght, const long unsigned int _ilft, const long unsigned int _ihfbit, long unsigned int *_ioffComp)
function of getting off-diagonal component
Definition: bitcalc.c:195
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
double s2
Expectation value of the square of the total S.
Definition: struct.h:370
long unsigned int * list_1
Definition: global.h:47
long unsigned int * list_2_2
Definition: global.h:50
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ totalspin_HubbardGC()

void totalspin_HubbardGC ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalspin for Hubbard model in grand canonical ensemble

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 161 of file expec_totalspin.c.

References BindStruct::Check, BindStruct::Def, CheckList::idim_max, DefineList::NsiteMPI, BindStruct::Phys, PhysList::s2, PhysList::Sz, and DefineList::Tpow.

Referenced by expec_totalspin().

161  {
162  long unsigned int j;
163  long unsigned int isite1, isite2;
164  long unsigned int is1_up, is2_up, is1_down, is2_down;
165  long unsigned int iexchg, off;
166  int num1_up, num2_up;
167  int num1_down, num2_down;
168  long unsigned int ibit1_up, ibit2_up, ibit1_down, ibit2_down, list_1_j;
169  double complex spn_z, tmp_spn_z;
170  double complex spn;
171  long unsigned int i_max;
172 
173  i_max = X->Check.idim_max;
174 
175  spn = 0.0;
176  spn_z = 0.0;
177  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
178  for (isite2 = 1; isite2 <= X->Def.NsiteMPI; isite2++) {
179  is1_up = X->Def.Tpow[2 * isite1 - 2];
180  is1_down = X->Def.Tpow[2 * isite1 - 1];
181  is2_up = X->Def.Tpow[2 * isite2 - 2];
182  is2_down = X->Def.Tpow[2 * isite2 - 1];
183 
184 #pragma omp parallel for reduction(+:spn, spn_z) default(none) firstprivate(i_max, is1_up, is1_down, is2_up, is2_down, isite1, isite2) private(list_1_j, ibit1_up, num1_up, ibit2_up, num2_up, ibit1_down, num1_down, ibit2_down, num2_down, tmp_spn_z, iexchg, off) shared(vec)
185  for (j = 1; j <= i_max; j++) {
186  list_1_j = j - 1;
187  ibit1_up = list_1_j & is1_up;
188  num1_up = ibit1_up / is1_up;
189  ibit2_up = list_1_j & is2_up;
190  num2_up = ibit2_up / is2_up;
191 
192  ibit1_down = list_1_j & is1_down;
193  num1_down = ibit1_down / is1_down;
194  ibit2_down = list_1_j & is2_down;
195  num2_down = ibit2_down / is2_down;
196 
197  tmp_spn_z = (num1_up - num1_down) * (num2_up - num2_down);
198  spn += conj(vec[j]) * vec[j] * tmp_spn_z / 4.0;
199  if (isite1 == isite2) {
200  spn += conj(vec[j]) * vec[j] * (num1_up + num1_down - 2 * num1_up * num1_down) / 2.0;
201  spn_z += conj(vec[j]) * vec[j] * (num1_up - num1_down) / 2.0;
202  } else {
203  if (ibit1_up != 0 && ibit1_down == 0 && ibit2_up == 0 && ibit2_down != 0) {
204  iexchg = list_1_j - (is1_up + is2_down);
205  iexchg += (is2_up + is1_down);
206  off = iexchg + 1;
207  spn += conj(vec[j]) * vec[off] / 2.0;
208  } else if (ibit1_up == 0 && ibit1_down != 0 && ibit2_up != 0 && ibit2_down == 0) {
209  iexchg = list_1_j - (is1_down + is2_up);
210  iexchg += (is2_down + is1_up);
211  off = iexchg + 1;
212  spn += conj(vec[j]) * vec[off] / 2.0;
213  }
214  }
215  }
216  }
217  }
218  X->Phys.s2 = creal(spn);
219  X->Phys.Sz = creal(spn_z);
220 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
double complex ** vec
Definition: global.h:45
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
double s2
Expectation value of the square of the total S.
Definition: struct.h:370
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the caller graph for this function:

◆ totalspin_Spin()

void totalspin_Spin ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalspin for spin model

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.2

modify for hybrid parallel

Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 233 of file expec_totalspin.c.

References BindStruct::Check, ConvertToList1GeneralSpin(), BindStruct::Def, FALSE, GetBitGeneral(), GetLocal2Sz(), GetOffComp(), GetOffCompGeneralSpin(), GetSplitBitByModel(), DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, list_2_1, list_2_2, myrank, DefineList::Nsite, DefineList::NsiteMPI, BindStruct::Phys, PhysList::s2, CheckList::sdim, DefineList::SiteToBit, SumMPI_dc(), PhysList::Sz, DefineList::Tpow, TRUE, X_child_general_int_spin_MPIsingle(), X_child_general_int_spin_TotalS_MPIdouble(), and X_SpinGC_CisAis().

Referenced by expec_totalspin().

233  {
234 
235  long unsigned int j;
236  long unsigned int irght, ilft, ihfbit;
237  long unsigned int isite1, isite2;
238  long unsigned int tmp_isite1, tmp_isite2;
239 
240  long unsigned int is1_up, is2_up;
241  long unsigned int iexchg, off, off_2;
242 
243  int num1_up, num2_up;
244  int num1_down, num2_down;
245  int sigma_1, sigma_2;
246  long unsigned int ibit1_up, ibit2_up, ibit_tmp, is_up;
247  double complex spn_z = 0.0;
248  double complex spn_z1, spn_z2, spn_zd;
249  double complex spn = 0.0;
250  long unsigned int i_max;
251 
252  i_max = X->Check.idim_max;
253  if (X->Def.iFlgGeneralSpin == FALSE) {
254  GetSplitBitByModel(X->Def.Nsite, X->Def.iCalcModel, &irght, &ilft, &ihfbit);
255  spn = 0.0;
256  spn_z = 0.0;
257  spn_zd = 0.0;
258  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
259  for (isite2 = 1; isite2 <= X->Def.NsiteMPI; isite2++) {
260 
261  if (isite1 > X->Def.Nsite && isite2 > X->Def.Nsite) {
262 #ifdef MPI
263  is1_up = X->Def.Tpow[isite1 - 1];
264  is2_up = X->Def.Tpow[isite2 - 1];
265  is_up = is1_up + is2_up;
266  num1_up = X_SpinGC_CisAis((unsigned long int) myrank + 1, X, is1_up, 1);
267  num1_down = 1 - num1_up;
268  num2_up = X_SpinGC_CisAis((unsigned long int) myrank + 1, X, is2_up, 1);
269  num2_down = 1 - num2_up;
270  spn_z = (num1_up - num1_down) * (num2_up - num2_down);
271 
272 #pragma omp parallel for default(none) reduction (+:spn_zd) shared(vec) \
273  firstprivate(i_max, spn_z) private(j)
274  for (j = 1; j <= i_max; j++) {
275  spn_zd += conj(vec[j]) * vec[j] * spn_z / 4.0;
276  }
277  if (isite1 == isite2) {
278 #pragma omp parallel for default(none) reduction (+:spn_zd) shared(vec) \
279  firstprivate(i_max) private(j)
280  for (j = 1; j <= i_max; j++) {
281  spn_zd += conj(vec[j]) * vec[j] / 2.0;
282  }
283  } else {//off diagonal
284  spn += X_child_general_int_spin_TotalS_MPIdouble(isite1 - 1, isite2 - 1, X, vec, vec);
285  }
286 #endif
287  } else if (isite1 > X->Def.Nsite || isite2 > X->Def.Nsite) {
288 #ifdef MPI
289  if (isite1 < isite2) {
290  tmp_isite1 = isite1;
291  tmp_isite2 = isite2;
292  } else {
293  tmp_isite1 = isite2;
294  tmp_isite2 = isite1;
295  }
296 
297  is1_up = X->Def.Tpow[tmp_isite1 - 1];
298  is2_up = X->Def.Tpow[tmp_isite2 - 1];
299  num2_up = X_SpinGC_CisAis((unsigned long int) myrank + 1, X, is2_up, 1);
300  num2_down = 1 - num2_up;
301 
302  //diagonal
303 #pragma omp parallel for reduction(+: spn_zd) default(none) firstprivate(i_max, is1_up, num2_up, num2_down) private(ibit1_up, num1_up, num1_down, spn_z) shared(list_1, vec)
304  for (j = 1; j <= i_max; j++) {
305  ibit1_up = list_1[j] & is1_up;
306  num1_up = ibit1_up / is1_up;
307  num1_down = 1 - num1_up;
308  spn_z = (num1_up - num1_down) * (num2_up - num2_down);
309  spn_zd += conj(vec[j]) * vec[j] * spn_z / 4.0;
310  }
311  if (isite1 < isite2) {
312  spn += X_child_general_int_spin_MPIsingle(isite1 - 1, 0, 1, isite2 - 1, 1, 0, 1.0, X, vec, vec);
313  } else {
314  spn += conj(X_child_general_int_spin_MPIsingle(isite2 - 1, 1, 0, isite1 - 1, 0, 1, 1.0, X, vec, vec));
315  }
316 #endif
317  }//isite1 > Nsite || isite2 > Nsite
318  else {
319  is1_up = X->Def.Tpow[isite1 - 1];
320  is2_up = X->Def.Tpow[isite2 - 1];
321  is_up = is1_up + is2_up;
322 
323 #pragma omp parallel for reduction(+: spn, spn_zd) default(none) firstprivate(i_max, is_up, is1_up, is2_up, irght, ilft, ihfbit, isite1, isite2) private(ibit1_up, num1_up, ibit2_up, num2_up, num1_down, num2_down, spn_z, iexchg, off, ibit_tmp) shared(list_1, list_2_1, list_2_2, vec)
324  for (j = 1; j <= i_max; j++) {
325  ibit1_up = list_1[j] & is1_up;
326  num1_up = ibit1_up / is1_up;
327  num1_down = 1 - num1_up;
328  ibit2_up = list_1[j] & is2_up;
329  num2_up = ibit2_up / is2_up;
330  num2_down = 1 - num2_up;
331 
332  spn_z = (num1_up - num1_down) * (num2_up - num2_down);
333  spn_zd += conj(vec[j]) * vec[j] * spn_z / 4.0;
334 
335  if (isite1 == isite2) {
336  spn_zd += conj(vec[j]) * vec[j] / 2.0;
337  } else {
338  ibit_tmp = (num1_up) ^ (num2_up);
339  if (ibit_tmp != 0) {
340  iexchg = list_1[j] ^ (is_up);
341  GetOffComp(list_2_1, list_2_2, iexchg, irght, ilft, ihfbit, &off);
342  spn += conj(vec[j]) * vec[off] / 2.0;
343  }
344  }
345  }// j
346  }
347  }//isite2
348  }//isite1
349  }//generalspin=FALSE
350  else {
351  double S1 = 0;
352  double S2 = 0;
353  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
354  for (isite2 = 1; isite2 <= X->Def.NsiteMPI; isite2++) {
355  S1 = 0.5 * (X->Def.SiteToBit[isite1 - 1] - 1);
356  S2 = 0.5 * (X->Def.SiteToBit[isite2 - 1] - 1);
357  if (isite1 == isite2) {
358 #pragma omp parallel for reduction(+: spn, spn_z) default(none) firstprivate(i_max, isite1, X, S1) private (spn_z1)shared(vec, list_1)
359  for (j = 1; j <= i_max; j++) {
360  spn_z1 = 0.5 * GetLocal2Sz(isite1, list_1[j], X->Def.SiteToBit, X->Def.Tpow);
361  spn += conj(vec[j]) * vec[j] * S1 * (S1 + 1.0);
362  spn_z += conj(vec[j]) * vec[j] * spn_z1;
363  }
364  } else {
365 #pragma omp parallel for reduction(+: spn) default(none) firstprivate(i_max, isite1, isite2, X, S1, S2) private(spn_z1, spn_z2, off, off_2, ibit_tmp, sigma_1, sigma_2) shared(vec, list_1)
366  for (j = 1; j <= i_max; j++) {
367  spn_z1 = 0.5 * GetLocal2Sz(isite1, list_1[j], X->Def.SiteToBit, X->Def.Tpow);
368  spn_z2 = 0.5 * GetLocal2Sz(isite2, list_1[j], X->Def.SiteToBit, X->Def.Tpow);
369  spn += conj(vec[j]) * vec[j] * spn_z1 * spn_z2;
370 
371  sigma_1 = GetBitGeneral(isite1, list_1[j], X->Def.SiteToBit, X->Def.Tpow);
372  sigma_2 = GetBitGeneral(isite2, list_1[j], X->Def.SiteToBit, X->Def.Tpow);
373 
374  ibit_tmp = GetOffCompGeneralSpin(list_1[j], isite2, sigma_2, sigma_2 + 1, &off, X->Def.SiteToBit,
375  X->Def.Tpow);
376  if (ibit_tmp == TRUE) {
377  ibit_tmp = GetOffCompGeneralSpin(off, isite1, sigma_1, sigma_1 - 1, &off_2, X->Def.SiteToBit,
378  X->Def.Tpow);
379  if (ibit_tmp == TRUE) {
380  ConvertToList1GeneralSpin(off_2, X->Check.sdim, &off);
381  spn += conj(vec[j]) * vec[off] * sqrt(S2 * (S2 + 1) - spn_z2 * (spn_z2 + 1)) *
382  sqrt(S1 * (S1 + 1) - spn_z1 * (spn_z1 - 1)) / 2.0;
383  }
384  }
385 
386  ibit_tmp = GetOffCompGeneralSpin(list_1[j], isite2, sigma_2, sigma_2 - 1, &off, X->Def.SiteToBit,
387  X->Def.Tpow);
388  if (ibit_tmp == TRUE) {
389  ibit_tmp = GetOffCompGeneralSpin(off, isite1, sigma_1, sigma_1 + 1, &off_2, X->Def.SiteToBit,
390  X->Def.Tpow);
391  if (ibit_tmp == TRUE) {
392  ConvertToList1GeneralSpin(off_2, X->Check.sdim, &off);
393  spn += conj(vec[j]) * vec[off] * sqrt(S2 * (S2 + 1) - spn_z2 * (spn_z2 - 1.0)) *
394  sqrt(S1 * (S1 + 1) - spn_z1 * (spn_z1 + 1)) / 2.0;
395  }
396  }
397  }
398  }
399  }
400  }
401  }
402 
403  spn = SumMPI_dc(spn);
404  spn_zd = SumMPI_dc(spn_zd);
405  spn_z = SumMPI_dc(spn_z);
406  spn += spn_zd;
407  X->Phys.s2 = creal(spn);
408  X->Phys.Sz = creal(spn_z);
409 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex SumMPI_dc(double complex norm)
MPI wrapper function to obtain sum of Double complex across processes.
Definition: wrapperMPI.c:205
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
int X_SpinGC_CisAis(long unsigned int j, struct BindStruct *X, long unsigned int is1_spin, long unsigned int sigma1)
Compute the grandcanonical spin state with bit mask is1_spin.
double complex ** vec
Definition: global.h:45
int ConvertToList1GeneralSpin(const long unsigned int org_ibit, const long unsigned int ihlfbit, long unsigned int *_ilist1Comp)
function of converting component to list_1
Definition: bitcalc.c:285
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
#define TRUE
Definition: global.h:26
unsigned long int sdim
Dimension for Ogata-Lin ???
Definition: struct.h:307
int GetLocal2Sz(const unsigned int isite, const long unsigned int org_bit, const long int *SiteToBit, const long unsigned int *Tpow)
get 2sz at a site for general spin
Definition: bitcalc.c:449
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
int GetSplitBitByModel(const int Nsite, const int iCalcModel, long unsigned int *irght, long unsigned int *ilft, long unsigned int *ihfbit)
function of splitting original bit into right and left spaces.
Definition: bitcalc.c:78
long unsigned int * list_2_1
Definition: global.h:49
int GetOffComp(long unsigned int *_list_2_1, long unsigned int *_list_2_2, long unsigned int _ibit, const long unsigned int _irght, const long unsigned int _ilft, const long unsigned int _ihfbit, long unsigned int *_ioffComp)
function of getting off-diagonal component
Definition: bitcalc.c:195
int GetBitGeneral(const unsigned int isite, const long unsigned int org_bit, const long int *SiteToBit, const long unsigned int *Tpow)
get bit at a site for general spin
Definition: bitcalc.c:422
double complex X_child_general_int_spin_TotalS_MPIdouble(int org_isite1, int org_isite3, struct BindStruct *X, double complex *tmp_v0, double complex *tmp_v1)
Exchange term in Spin model When both site1 and site2 are in the inter process region.
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
double s2
Expectation value of the square of the total S.
Definition: struct.h:370
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.h:86
#define FALSE
Definition: global.h:25
long unsigned int * list_1
Definition: global.h:47
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.h:94
long unsigned int * list_2_2
Definition: global.h:50
double complex X_child_general_int_spin_MPIsingle(int org_isite1, int org_ispin1, int org_ispin2, int org_isite3, int org_ispin3, int org_ispin4, double complex tmp_J, struct BindStruct *X, double complex *tmp_v0, double complex *tmp_v1)
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
int GetOffCompGeneralSpin(const long unsigned int org_ibit, const int org_isite, const int org_ispin, const int off_ispin, long unsigned int *_ioffComp, const long int *SiteToBit, const long unsigned int *Tpow)
function of getting off-diagonal component for general spin
Definition: bitcalc.c:243
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ totalspin_SpinGC()

void totalspin_SpinGC ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalspin for spin model in grand canonical ensemble

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.2

add function to treat a calculation of total spin for general spin

Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 424 of file expec_totalspin.c.

References BindStruct::Check, BindStruct::Def, FALSE, GetBitGeneral(), GetLocal2Sz(), GetOffCompGeneralSpin(), CheckList::idim_max, DefineList::iFlgGeneralSpin, BindStruct::Large, LargeList::mode, myrank, DefineList::Nsite, DefineList::NsiteMPI, BindStruct::Phys, PhysList::s2, DefineList::SiteToBit, SumMPI_dc(), PhysList::Sz, DefineList::Tpow, X_GC_child_CisAitCiuAiv_spin_MPIdouble(), X_GC_child_CisAitCiuAiv_spin_MPIsingle(), and X_SpinGC_CisAis().

Referenced by expec_totalspin().

424  {
425 
426  long unsigned int j;
427  long unsigned int isite1,isite2, tmp_isite1, tmp_isite2;
428  long unsigned int is1_up,is2_up;
429  long unsigned int iexchg, off, off_2;
430  int num1_up,num2_up;
431  int num1_down,num2_down;
432  int sigma_1, sigma_2;
433  long unsigned int ibit1_up,ibit2_up,ibit_tmp,is_up;
434  double complex spn_z;
435  double complex spn_z1, spn_z2;
436  double complex spn, spn_d;
437  long unsigned int list_1_j;
438  long unsigned int i_max;
439  i_max=X->Check.idim_max;
440  X->Large.mode = M_TOTALS;
441  spn=0.0;
442  spn_d=0.0;
443  spn_z=0.0;
444  if(X->Def.iFlgGeneralSpin==FALSE){
445  for(isite1=1;isite1<=X->Def.NsiteMPI;isite1++){
446  if(isite1 > X->Def.Nsite){
447  is1_up = X->Def.Tpow[isite1-1];
448  ibit1_up = myrank&is1_up;
449  num1_up = ibit1_up/is1_up;
450  num1_down =1-num1_up;
451 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(i_max, is1_up, num1_up, num1_down) shared(vec)
452  for(j=1;j<=i_max;j++){
453  spn_z += conj(vec[j])*vec[j]*(num1_up-num1_down)/2.0;
454  }
455  }
456  else{
457  is1_up = X->Def.Tpow[isite1-1];
458 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(i_max, is1_up) private(list_1_j, ibit1_up, num1_up, num1_down) shared(vec)
459  for(j=1;j<=i_max;j++){
460  list_1_j=j-1;
461  ibit1_up = list_1_j&is1_up;
462  num1_up = ibit1_up/is1_up;
463  num1_down = 1-num1_up;
464  spn_z += conj(vec[j])*vec[j]*(num1_up-num1_down)/2.0;
465  }
466 
467  }
468  for(isite2=1;isite2<=X->Def.NsiteMPI;isite2++){
469 
470  if(isite1 > X->Def.Nsite && isite2 > X->Def.Nsite){
471  is1_up = X->Def.Tpow[isite1-1];
472  is2_up = X->Def.Tpow[isite2-1];
473  num1_up = X_SpinGC_CisAis((unsigned long int)myrank + 1, X, is1_up, 1);
474  num1_down = 1-num1_up;
475  num2_up = X_SpinGC_CisAis((unsigned long int)myrank + 1, X, is2_up, 1);
476  num2_down = 1-num2_up;
477  spn_z2 = (num1_up-num1_down)*(num2_up-num2_down)/4.0;
478 #pragma omp parallel for default(none) reduction (+:spn_d) shared(vec) \
479  firstprivate(i_max, spn_z2) private(j)
480  for (j = 1; j <= i_max; j++) {
481  spn_d += conj(vec[j])*vec[j]*spn_z2;
482  }
483  if(isite1 == isite2){
484 #pragma omp parallel for default(none) reduction (+:spn_d) shared(vec) \
485  firstprivate(i_max) private(j)
486  for (j = 1; j <= i_max; j++) {
487  spn_d += conj(vec[j])*vec[j]/2.0;
488  }
489  }//isite1 = isite2
490  else{//off diagonal
491  spn += X_GC_child_CisAitCiuAiv_spin_MPIdouble(isite1-1, 0, 1, isite2-1, 1, 0, 1.0, X, vec, vec)/2.0;
492  }
493  }
494  else if(isite1 > X->Def.Nsite || isite2 > X->Def.Nsite){
495  if(isite1 < isite2){
496  tmp_isite1=isite1;
497  tmp_isite2=isite2;
498  }
499  else{
500  tmp_isite1 = isite2;
501  tmp_isite2 = isite1;
502  }
503  is1_up = X->Def.Tpow[tmp_isite1 - 1];
504  is2_up = X->Def.Tpow[tmp_isite2 - 1];
505  num2_up = X_SpinGC_CisAis((unsigned long int)myrank + 1, X, is2_up, 1);
506  num2_down =1-num2_up;
507  //diagonal
508 #pragma omp parallel for reduction(+: spn_d) default(none) firstprivate(i_max, is1_up, num2_up, num2_down) private(ibit1_up, num1_up, num1_down, spn_z2, list_1_j) shared(vec)
509  for(j=1;j<=i_max;j++){
510  list_1_j=j-1;
511  ibit1_up = list_1_j&is1_up;
512  num1_up = ibit1_up/is1_up;
513  num1_down = 1-num1_up;
514  spn_z2 = (num1_up-num1_down)*(num2_up-num2_down);
515  spn_d += conj(vec[j])*vec[j]*spn_z2/4.0;
516  }
517  if(isite1 < isite2){
518  spn += X_GC_child_CisAitCiuAiv_spin_MPIsingle(isite1-1, 0, 1, isite2-1, 1, 0, 1.0, X, vec, vec)/2.0;
519  }
520  else{
521  spn += conj(X_GC_child_CisAitCiuAiv_spin_MPIsingle(isite2-1, 1, 0, isite1-1, 0, 1, 1.0, X, vec, vec))/2.0;
522  }
523  }
524  else{
525  is2_up = X->Def.Tpow[isite2-1];
526  is_up = is1_up+is2_up;
527 #pragma omp parallel for reduction(+: spn, spn_d) default(none) firstprivate(i_max, is_up, is1_up, is2_up, isite1, isite2) private(list_1_j, ibit1_up, num1_up, ibit2_up, num2_up, num1_down, num2_down, spn_z2, iexchg, off, ibit_tmp) shared(vec)
528  for(j=1;j<=i_max;j++){
529  list_1_j=j-1;
530  ibit1_up = list_1_j&is1_up;
531  num1_up = ibit1_up/is1_up;
532  num1_down = 1-num1_up;
533  ibit2_up = list_1_j&is2_up;
534  num2_up = ibit2_up/is2_up;
535  num2_down = 1-num2_up;
536 
537  spn_z2 = (num1_up-num1_down)*(num2_up-num2_down);
538  spn_d += conj(vec[j])*vec[j]*spn_z2/4.0;
539 
540  if(isite1==isite2){
541  spn_d += conj(vec[j])*vec[j]/2.0;
542  }else{
543  ibit_tmp = (num1_up) ^ (num2_up);
544  if(ibit_tmp!=0){
545  iexchg = list_1_j ^ (is_up);
546  off = iexchg+1;
547  spn += conj(vec[j])*vec[off]/2.0;
548  }
549 
550  }
551  }//j
552  }//else
553  }
554  }
555  }
556  else{//general spin
557  double S1=0;
558  double S2=0;
559  spn =0.0;
560  spn_z=0.0;
561  for(isite1=1;isite1<=X->Def.NsiteMPI;isite1++){
562  S1=0.5*(X->Def.SiteToBit[isite1-1]-1);
563  if(isite1 > X->Def.Nsite){
564  spn_z1 = 0.5*GetLocal2Sz(isite1, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
565 #pragma omp parallel for reduction(+: spn, spn_z) default(none) firstprivate(S1, spn_z1,i_max) shared(vec)
566  for(j=1;j<=i_max;j++){
567  spn += conj(vec[j])*vec[j]*S1*(S1+1.0);
568  spn_z += conj(vec[j])*vec[j]*spn_z1;
569  }
570  }
571  else{
572 #pragma omp parallel for reduction(+: spn, spn_z) default(none) firstprivate(i_max, isite1, X, S1) private(spn_z1) shared(vec)
573  for(j=1;j<=i_max;j++){
574  spn_z1 = 0.5*GetLocal2Sz(isite1, j-1, X->Def.SiteToBit, X->Def.Tpow);
575  spn += conj(vec[j])*vec[j]*S1*(S1+1.0);
576  spn_z += conj(vec[j])*vec[j]*spn_z1;
577  }
578  }
579  for(isite2=1;isite2<=X->Def.NsiteMPI;isite2++){
580  if(isite1==isite2) continue;
581  S2=0.5*(X->Def.SiteToBit[isite2-1]-1);
582 
583  if(isite1 > X->Def.Nsite && isite2 > X->Def.Nsite){
584  /*
585  spn_z1 = 0.5*GetLocal2Sz(isite1, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
586  spn_z2 = 0.5*GetLocal2Sz(isite2, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
587 #pragma omp parallel for reduction(+: spn, spn_z) default(none) firstprivate(spn_z1, spn_z2, i_max) shared(vec)
588  for(j=1;j<=i_max;j++){
589  spn += conj(vec[j])*vec[j]*spn_z1*spn_z2;
590  }
591  tmp_V= sqrt(S2*(S2+1) - spn_z2*(spn_z2+1))*sqrt(S1*(S1+1) - spn_z1*(spn_z1-1))/2.0;
592  spn += X_GC_child_CisAitCjuAjv_GeneralSpin_MPIdouble(isite1-1, sigma_1-1, sigma_1, isite2-1, sigma_2+1, sigma_2, tmp_V, X,vec, vec);
593  tmp_V= sqrt(S2*(S2+1) - spn_z2*(spn_z2-1))*sqrt(S1*(S1+1) - spn_z1*(spn_z1+1))/2.0;
594  spn += X_GC_child_CisAitCjuAjv_GeneralSpin_MPIdouble(isite1-1, sigma_1+1, sigma_1, isite2-1, sigma_2-1, sigma_2, tmp_V, X,vec, vec);
595  */
596  }
597  else if(isite1 > X->Def.Nsite || isite2 > X->Def.Nsite){
598  /*
599  if(isite1 < isite2){
600  tmp_isite1=isite1;
601  tmp_isite2=isite2;
602  }
603  else{
604  tmp_isite1 = isite2;
605  tmp_isite2 = isite1;
606  }
607  spn_z2 = 0.5*GetLocal2Sz(tmp_isite2, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
608  sigma_2 = GetBitGeneral(tmp_isite2, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
609  */
610  }
611  else{ //inner-process
612 #pragma omp parallel for reduction(+: spn) default(none) firstprivate(i_max, isite1, isite2, X, S1, S2) private(spn_z1, spn_z2, off, off_2, ibit_tmp, sigma_1, sigma_2) shared(vec)
613  for(j=1;j<=i_max;j++){
614  spn_z1 = 0.5*GetLocal2Sz(isite1, j-1, X->Def.SiteToBit, X->Def.Tpow);
615  spn_z2 = 0.5*GetLocal2Sz(isite2, j-1, X->Def.SiteToBit, X->Def.Tpow);
616  spn += conj(vec[j])*vec[j]*spn_z1*spn_z2;
617 
618  sigma_1=GetBitGeneral(isite1, j-1, X->Def.SiteToBit, X->Def.Tpow);
619  sigma_2=GetBitGeneral(isite2, j-1, X->Def.SiteToBit, X->Def.Tpow);
620 
621  ibit_tmp = GetOffCompGeneralSpin(j-1, isite2, sigma_2, sigma_2+1, &off, X->Def.SiteToBit, X->Def.Tpow);
622  if(ibit_tmp!=0){
623  ibit_tmp = GetOffCompGeneralSpin(off, isite1, sigma_1, sigma_1-1,&off_2, X->Def.SiteToBit, X->Def.Tpow);
624  if(ibit_tmp!=0){
625  spn += conj(vec[j])*vec[off_2+1]*sqrt(S2*(S2+1) - spn_z2*(spn_z2+1))*sqrt(S1*(S1+1) - spn_z1*(spn_z1-1))/2.0;
626  }
627  }
628 
629  ibit_tmp = GetOffCompGeneralSpin(j-1, isite2, sigma_2, sigma_2-1, &off, X->Def.SiteToBit, X->Def.Tpow);
630  if(ibit_tmp !=0){
631  ibit_tmp = GetOffCompGeneralSpin(off, isite1, sigma_1, sigma_1+1, &off_2, X->Def.SiteToBit, X->Def.Tpow);
632  if(ibit_tmp!=0){
633  spn += conj(vec[j])*vec[off_2+1]*sqrt(S2*(S2+1) - spn_z2*(spn_z2-1.0))*sqrt(S1*(S1+1)- spn_z1*(spn_z1+1))/2.0;
634  }
635  }
636  }//j
637  }//inner-process
638  }//isite2
639  }//isite1
640  }
641 
642  spn = SumMPI_dc(spn);
643  spn_d = SumMPI_dc(spn_d);
644  spn_z = SumMPI_dc(spn_z);
645  X->Phys.s2=creal(spn+spn_d);
646  X->Phys.Sz=creal(spn_z);
647 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex SumMPI_dc(double complex norm)
MPI wrapper function to obtain sum of Double complex across processes.
Definition: wrapperMPI.c:205
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
double complex X_GC_child_CisAitCiuAiv_spin_MPIsingle(int org_isite1, int org_ispin1, int org_ispin2, int org_isite3, int org_ispin3, int org_ispin4, double complex tmp_J, struct BindStruct *X, double complex *tmp_v0, double complex *tmp_v1)
Exchange and Pairlifting term in Spin model + GC When only site2 is in the inter process region...
int X_SpinGC_CisAis(long unsigned int j, struct BindStruct *X, long unsigned int is1_spin, long unsigned int sigma1)
Compute the grandcanonical spin state with bit mask is1_spin.
double complex ** vec
Definition: global.h:45
struct LargeList Large
Variables for Matrix-Vector product.
Definition: struct.h:412
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
int mode
multiply or expectation value.
Definition: struct.h:330
int GetLocal2Sz(const unsigned int isite, const long unsigned int org_bit, const long int *SiteToBit, const long unsigned int *Tpow)
get 2sz at a site for general spin
Definition: bitcalc.c:449
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
int GetBitGeneral(const unsigned int isite, const long unsigned int org_bit, const long int *SiteToBit, const long unsigned int *Tpow)
get bit at a site for general spin
Definition: bitcalc.c:422
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
double s2
Expectation value of the square of the total S.
Definition: struct.h:370
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.h:86
#define FALSE
Definition: global.h:25
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.h:94
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
int GetOffCompGeneralSpin(const long unsigned int org_ibit, const int org_isite, const int org_ispin, const int off_ispin, long unsigned int *_ioffComp, const long int *SiteToBit, const long unsigned int *Tpow)
function of getting off-diagonal component for general spin
Definition: bitcalc.c:243
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
double complex X_GC_child_CisAitCiuAiv_spin_MPIdouble(int org_isite1, int org_ispin1, int org_ispin2, int org_isite3, int org_ispin3, int org_ispin4, double complex tmp_J, struct BindStruct *X, double complex *tmp_v0, double complex *tmp_v1)
term in Spin model + GC. When both site1 and site2 are in the inter process region.
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ totalSz_HubbardGC()

void totalSz_HubbardGC ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalSz for Hubbard model in grand canonical ensemble

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 688 of file expec_totalspin.c.

References BindStruct::Check, BindStruct::Def, CheckList::idim_max, myrank, DefineList::Nsite, DefineList::NsiteMPI, BindStruct::Phys, SumMPI_dc(), PhysList::Sz, totalSz_SpinGC(), and DefineList::Tpow.

Referenced by expec_totalSz().

691  {
692 
693  long unsigned int j;
694  long unsigned int isite1;
695  long unsigned int is1_up, is1_down;
696  int num1_up, num1_down, num1_sz;
697  long unsigned int ibit1_up, ibit1_down, list_1_j;
698  double complex spn_z;
699  long unsigned int i_max;
700 
701  i_max = X->Check.idim_max;
702  spn_z = 0.0;
703  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
704  if (isite1 > X->Def.Nsite) {
705 #ifdef MPI
706  is1_up = X->Def.Tpow[2 * isite1 - 2];
707  is1_down = X->Def.Tpow[2 * isite1 - 1];
708  ibit1_up = (unsigned long int) myrank & is1_up;
709  num1_up = ibit1_up / is1_up;
710  ibit1_down = (unsigned long int) myrank & is1_down;
711  num1_down = ibit1_down / is1_down;
712  num1_sz = num1_up - num1_down;
713 #pragma omp parallel for reduction(+:spn_z) default(none) firstprivate(i_max) private(j) shared(num1_sz,vec)
714  for (j = 1; j <= i_max; j++) {
715  spn_z += (num1_sz) / 2.0 * conj(vec[j]) * vec[j];
716  }
717 #endif
718  } else {//isite1 > X->Def.Nsite
719  is1_up = X->Def.Tpow[2 * isite1 - 2];
720  is1_down = X->Def.Tpow[2 * isite1 - 1];
721 #pragma omp parallel for reduction(+:spn_z) default(none) firstprivate(i_max, is1_up, is1_down, isite1) \
722  private(list_1_j, ibit1_up, num1_up, ibit1_down, num1_down) shared(vec)
723  for (j = 1; j <= i_max; j++) {
724  list_1_j = j - 1;
725  ibit1_up = list_1_j & is1_up;
726  num1_up = ibit1_up / is1_up;
727  ibit1_down = list_1_j & is1_down;
728  num1_down = ibit1_down / is1_down;
729  spn_z += conj(vec[j]) * vec[j] * (num1_up - num1_down) / 2.0;
730  }
731  }
732  }
733  spn_z = SumMPI_dc(spn_z);
734  X->Phys.Sz = creal(spn_z);
735 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex SumMPI_dc(double complex norm)
MPI wrapper function to obtain sum of Double complex across processes.
Definition: wrapperMPI.c:205
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
double complex ** vec
Definition: global.h:45
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ totalSz_SpinGC()

void totalSz_SpinGC ( struct BindStruct X,
double complex *  vec 
)

function of calculating totalSz for Spin model in grand canonical ensemble

Parameters
[in,out]Xdata list of calculation parameters
veceigenvector
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 747 of file expec_totalspin.c.

References BindStruct::Check, BindStruct::Def, FALSE, GetLocal2Sz(), CheckList::idim_max, DefineList::iFlgGeneralSpin, BindStruct::Large, LargeList::mode, myrank, DefineList::Nsite, DefineList::NsiteMPI, BindStruct::Phys, DefineList::SiteToBit, SumMPI_dc(), PhysList::Sz, and DefineList::Tpow.

Referenced by expec_totalSz(), and totalSz_HubbardGC().

750  {
751  long unsigned int j, list_1_j;
752  long unsigned int isite1;
753  long unsigned int is1_up;
754  int num1_up;
755  int num1_down;
756  long unsigned int ibit1_up;
757  double complex spn_z, spn_z1;
758  long unsigned int i_max;
759  i_max = X->Check.idim_max;
760  X->Large.mode = M_TOTALS;
761  spn_z = 0.0;
762  if (X->Def.iFlgGeneralSpin == FALSE) {
763  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
764  if (isite1 > X->Def.Nsite) {
765 #ifdef MPI
766  is1_up = X->Def.Tpow[isite1 - 1];
767  ibit1_up = myrank & is1_up;
768  num1_up = ibit1_up / is1_up;
769  num1_down = 1 - num1_up;
770 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(i_max, is1_up, num1_up, num1_down) shared(vec)
771  for (j = 1; j <= i_max; j++) {
772  spn_z += conj(vec[j]) * vec[j] * (num1_up - num1_down) / 2.0;
773  }
774 #endif
775  } else {
776  is1_up = X->Def.Tpow[isite1 - 1];
777 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(i_max, is1_up) private(list_1_j, ibit1_up, num1_up, num1_down) shared(vec)
778  for (j = 1; j <= i_max; j++) {
779  list_1_j = j - 1;
780  ibit1_up = list_1_j & is1_up;
781  num1_up = ibit1_up / is1_up;
782  num1_down = 1 - num1_up;
783  spn_z += conj(vec[j]) * vec[j] * (num1_up - num1_down) / 2.0;
784  }
785  }//else
786  }
787  } else {//general spin
788  spn_z = 0.0;
789  for (isite1 = 1; isite1 <= X->Def.NsiteMPI; isite1++) {
790  if (isite1 > X->Def.Nsite) {
791  spn_z1 = 0.5 * GetLocal2Sz(isite1, (unsigned long int) myrank, X->Def.SiteToBit, X->Def.Tpow);
792 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(spn_z1, i_max) shared(vec)
793  for (j = 1; j <= i_max; j++) {
794  spn_z += conj(vec[j]) * vec[j] * spn_z1;
795  }
796  } else {
797 #pragma omp parallel for reduction(+: spn_z) default(none) firstprivate(i_max, isite1, X) private(spn_z1) shared(vec)
798  for (j = 1; j <= i_max; j++) {
799  spn_z1 = 0.5 * GetLocal2Sz(isite1, j - 1, X->Def.SiteToBit, X->Def.Tpow);
800  spn_z += conj(vec[j]) * vec[j] * spn_z1;
801  }
802  }
803  }//isite1
804  }
805  spn_z = SumMPI_dc(spn_z);
806  X->Phys.Sz = creal(spn_z);
807 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
double complex SumMPI_dc(double complex norm)
MPI wrapper function to obtain sum of Double complex across processes.
Definition: wrapperMPI.c:205
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
double complex ** vec
Definition: global.h:45
struct LargeList Large
Variables for Matrix-Vector product.
Definition: struct.h:412
struct PhysList Phys
Physical quantities.
Definition: struct.h:413
int mode
multiply or expectation value.
Definition: struct.h:330
int GetLocal2Sz(const unsigned int isite, const long unsigned int org_bit, const long int *SiteToBit, const long unsigned int *Tpow)
get 2sz at a site for general spin
Definition: bitcalc.c:449
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.h:86
#define FALSE
Definition: global.h:25
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.h:94
double Sz
Expectation value of the Total Sz.
Definition: struct.h:360
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
+ Here is the call graph for this function:
+ Here is the caller graph for this function: