HΦ  3.2.0
CheckMPI.c File Reference

Compute total number of electrons, spins. More...

#include "Common.h"
#include "wrapperMPI.h"
+ Include dependency graph for CheckMPI.c:

Go to the source code of this file.

Functions

int CheckMPI (struct BindStruct *X)
 Define the number of sites in each PE (DefineList.Nsite). Reduce the number of electrons (DefineList.Ne), total Sz (DefineList.Total2Sz) by them in the inter process region. More...
 
void CheckMPI_Summary (struct BindStruct *X)
 Print infomation of MPI parallelization Modify Definelist::Tpow in the inter process region. More...
 

Detailed Description

Compute total number of electrons, spins.

Definition in file CheckMPI.c.

Function Documentation

◆ CheckMPI()

int CheckMPI ( struct BindStruct X)

Define the number of sites in each PE (DefineList.Nsite). Reduce the number of electrons (DefineList.Ne), total Sz (DefineList.Total2Sz) by them in the inter process region.

Author
Mitsuaki Kawamura (The University of Tokyo)

Branch for each model

Check the number of processes for Boost

Parameters
[in,out]X

Definition at line 27 of file CheckMPI.c.

References BindStruct::Boost, cErrNProcNumber, cErrNProcNumberGneralSpin, cErrNProcNumberHubbard, cErrNProcNumberSet, cErrNProcNumberSpin, BindStruct::Def, exitMPI(), FALSE, BoostList::flgBoost, DefineList::iCalcModel, DefineList::iFlgGeneralSpin, BoostList::ishift_nspin, ITINERANT, BoostList::list_6spin_star, DefineList::LocSpn, myrank, DefineList::Ndown, DefineList::Ne, DefineList::NLocSpn, nproc, DefineList::Nsite, DefineList::NsiteMPI, BoostList::num_pivot, DefineList::Nup, DefineList::SiteToBit, stdoutMPI, DefineList::Total2Sz, DefineList::Total2SzMPI, TRUE, and BoostList::W0.

Referenced by check().

28 {
29  int isite, NDimInterPE, SmallDim, SpinNum, ipivot, ishift, isiteMax, isiteMax0;
30 
35  X->Def.NsiteMPI = X->Def.Nsite;
36  X->Def.Total2SzMPI = X->Def.Total2Sz;
37  switch (X->Def.iCalcModel) {
38  case HubbardGC: /****************************************************/
39  case Hubbard:
40  case HubbardNConserved:
41  case Kondo:
42  case KondoGC:
43 
49  NDimInterPE = 1;
50  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
51  if (NDimInterPE == nproc) {
52  X->Def.Nsite = isite;
53  break;
54  } /*if (NDimInterPE == nproc)*/
55  NDimInterPE *= 4;
56  } /*for (isite = NsiteMPI; isite > 0; isite--)*/
57 
58  if (isite == 0) {
59  fprintf(stdoutMPI, "%s", cErrNProcNumberHubbard);
60  fprintf(stdoutMPI, cErrNProcNumber, nproc);
61  NDimInterPE = 1;
62  int ismallNproc=1;
63  int ilargeNproc=1;
64  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
65  if (NDimInterPE > nproc) {
66  ilargeNproc = NDimInterPE;
67  if(isite >1)
68  ismallNproc = NDimInterPE/4;
69  break;
70  }/*if (NDimInterPE > nproc)*/
71  NDimInterPE *= 4;
72  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
73  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
74  return FALSE;
75  //return FALSE;
76  } /*if (isite == 0)*/
77 
78  switch (X->Def.iCalcModel) /*2 (inner)*/ {
79 
80  case Hubbard:
86  SmallDim = myrank;
87  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
88  SpinNum = SmallDim % 4;
89  SmallDim /= 4;
90  if (SpinNum == 1 /*01*/) {
91  X->Def.Nup -= 1;
92  X->Def.Ne -= 1;
93  }
94  else if (SpinNum == 2 /*10*/) {
95  X->Def.Ndown -= 1;
96  X->Def.Ne -= 1;
97  }
98  else if (SpinNum == 3 /*11*/){
99  X->Def.Nup -= 1;
100  X->Def.Ndown -= 1;
101  X->Def.Ne -= 2;
102  }
103  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
104 
105  break;/*case Hubbard:*/
106 
107  case HubbardNConserved:
112  SmallDim = myrank;
113  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
114  SpinNum = SmallDim % 4;
115  SmallDim /= 4;
116  if (SpinNum == 1 /*01*/ || SpinNum == 2 /*10*/) X->Def.Ne -= 1;
117  else if (SpinNum == 3 /*11*/) X->Def.Ne -= 2;
118  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
119 
120  break; /*case HubbardNConserved:*/
121 
122  case KondoGC:
123  case Kondo:
129  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)
130  if (X->Def.LocSpn[isite] != ITINERANT) X->Def.NLocSpn -= 1;
131 
132  if (X->Def.iCalcModel == Kondo) {
133  SmallDim = myrank;
134  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
135  SpinNum = SmallDim % 4;
136  SmallDim /= 4;
137  if (X->Def.LocSpn[isite] == ITINERANT) {
138  if (SpinNum == 1 /*01*/) {
139  X->Def.Nup -= 1;
140  X->Def.Ne -= 1;
141  }
142  else if (SpinNum == 2 /*10*/) {
143  X->Def.Ndown -= 1;
144  X->Def.Ne -= 1;
145  }
146  else if (SpinNum == 3 /*11*/) {
147  X->Def.Nup -= 1;
148  X->Def.Ndown -= 1;
149  X->Def.Ne -= 2;
150  }
151  }
152  else {
153  fprintf(stdoutMPI, "\n Stop because local spin in the inter process region\n");
154  return FALSE;
155  }
156  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
157  } /*if (X->Def.iCalcModel == Kondo)*/
158 
159  break; /*case KondoGC, Kondo*/
160 
161  } /*switch (X->Def.iCalcModel) 2(inner)*/
162 
163  break; /*case HubbardGC, Hubbard, HubbardNConserved, Kondo, KondoGC:*/
165  case SpinGC:/********************************************************/
166  case Spin:
167 
168  if (X->Def.iFlgGeneralSpin == FALSE) {
173  NDimInterPE = 1;
174  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
175  if (NDimInterPE == nproc) {
176  X->Def.Nsite = isite;
177  break;
178  }/*if (NDimInterPE == nproc)*/
179  NDimInterPE *= 2;
180  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
181 
182  if (isite == 0) {
183  fprintf(stdoutMPI, "%s", cErrNProcNumberSpin);
184  fprintf(stdoutMPI, cErrNProcNumber, nproc);
185  NDimInterPE = 1;
186  int ismallNproc=1;
187  int ilargeNproc=1;
188  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
189  if (NDimInterPE > nproc) {
190  ilargeNproc = NDimInterPE;
191  if(isite >1)
192  ismallNproc = NDimInterPE/2;
193  break;
194  }/*if (NDimInterPE > nproc)*/
195  NDimInterPE *= 2;
196  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
197  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
198  return FALSE;
199  }/*if (isite == 0)*/
200 
201  if (X->Def.iCalcModel == Spin) {
202  /*X->Def.NeMPI = X->Def.Ne;*/
203 
204  /* Ne should be different in each PE */
205  SmallDim = myrank;
206  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
207  SpinNum = SmallDim % 2;
208  SmallDim /= 2;
209  if (SpinNum == 0) {
210  X->Def.Ndown -= 1;
211  }
212  else {
213  X->Def.Ne -= 1;
214  X->Def.Nup -= 1;
215  }
216  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
217  }/*if (X->Def.iCalcModel == Spin)*/
218 
219  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
220  else{/* General Spin */
225  NDimInterPE = 1;
226  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
227  if (NDimInterPE == nproc) {
228  X->Def.Nsite = isite;
229  break;
230  }/*if (NDimInterPE == nproc)*/
231  NDimInterPE *= X->Def.SiteToBit[isite - 1];
232  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
233 
234  if (isite == 0) {
235  fprintf(stdoutMPI, "%s", cErrNProcNumberGneralSpin);
236  fprintf(stdoutMPI, cErrNProcNumber, nproc);
237  NDimInterPE = 1;
238  int ismallNproc=1;
239  int ilargeNproc=1;
240  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
241  if (NDimInterPE > nproc) {
242  ilargeNproc = NDimInterPE;
243  if(isite >1)
244  ismallNproc = NDimInterPE/X->Def.SiteToBit[isite - 2];
245  break;
246  }/*if (NDimInterPE > nproc)*/
247  NDimInterPE *= X->Def.SiteToBit[isite - 1];
248  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
249  fprintf(stdoutMPI, cErrNProcNumberSet,ismallNproc, ilargeNproc );
250  return FALSE;
251  }/*if (isite == 0)*/
252 
253  if (X->Def.iCalcModel == Spin) {
254  X->Def.Total2SzMPI = X->Def.Total2Sz;
255 
256  /* Ne should be different in each PE */
257  SmallDim = myrank;
258  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
259  SpinNum = SmallDim % X->Def.SiteToBit[isite];
260  SmallDim /= X->Def.SiteToBit[isite];
261 
262  X->Def.Total2Sz += X->Def.SiteToBit[isite] - 1 - 2*SpinNum;
263  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
264  }/*if (X->Def.iCalcModel == Spin)*/
265  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
266 
268  break; /*case SpinGC, Spin*/
269 
270  default:
271  fprintf(stdoutMPI, "Error ! Wrong model !\n");
272  return FALSE;
273  }/*switch (X->Def.iCalcModel)*/
274 
278  if (X->Boost.flgBoost == 1) {
279  isiteMax = X->Boost.W0;
280  ishift = 0;
281  for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++) {
282  isiteMax0 = X->Boost.list_6spin_star[ipivot][1]
283  + X->Boost.list_6spin_star[ipivot][2]
284  + X->Boost.list_6spin_star[ipivot][3]
285  + X->Boost.list_6spin_star[ipivot][4]
286  + X->Boost.list_6spin_star[ipivot][5];
287  if (ishift > 1) isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 1 - ishift;
288  else isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 2;
289  if (isiteMax0 < isiteMax) isiteMax = isiteMax0;
290  if (X->Boost.list_6spin_star[ipivot][6] == 1) ishift += X->Boost.ishift_nspin;
291  }/*for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++)*/
292 
293  NDimInterPE = 1;
294  for (isite = 0; isite < isiteMax; isite++) NDimInterPE *= 2;
295 
296  if (NDimInterPE < nproc) {
297  fprintf(stderr, "\n Error ! in ReadDefFileIdxPara.\n");
298  fprintf(stderr, "Too many MPI processes ! It should be <= %d. \n\n", NDimInterPE);
299  exitMPI(-1);
300  }/*if (NDimInterPE < nproc)*/
301  }/*if (X->Boost.flgBoost == 1)*/
302 
303  return TRUE;
304 }/*void CheckMPI*/
void exitMPI(int errorcode)
MPI Abortation wrapper.
Definition: wrapperMPI.c:86
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
long unsigned int W0
Definition: struct.h:397
int ** list_6spin_star
Definition: struct.h:403
unsigned int NLocSpn
Number of local spins.
Definition: struct.h:84
char * cErrNProcNumberGneralSpin
Definition: ErrorMessage.c:93
#define ITINERANT
Definition: global.h:31
int Total2Sz
Total in this process.
Definition: struct.h:69
int * LocSpn
[DefineList::NLocSpn] Flag (and size) of the local spin. malloc in setmem_def().
Definition: struct.h:82
unsigned int Nup
Number of spin-up electrons in this process.
Definition: struct.h:58
long unsigned int num_pivot
Definition: struct.h:398
#define TRUE
Definition: global.h:26
char * cErrNProcNumberSet
Definition: ErrorMessage.c:95
char * cErrNProcNumberSpin
Definition: ErrorMessage.c:92
char * cErrNProcNumber
Definition: ErrorMessage.c:94
unsigned int Ndown
Number of spin-down electrons in this process.
Definition: struct.h:59
int nproc
Number of processors, defined in InitializeMPI()
Definition: global.h:162
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
struct BoostList Boost
For Boost.
Definition: struct.h:414
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
long unsigned int ishift_nspin
Definition: struct.h:399
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.h:86
#define FALSE
Definition: global.h:25
char * cErrNProcNumberHubbard
Error Message in CheckMPI.c.
Definition: ErrorMessage.c:91
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.h:94
unsigned int Ne
Number of electrons in this process.
Definition: struct.h:71
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
int flgBoost
Flag whether use CMA algorithm.
Definition: struct.h:395
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
int Total2SzMPI
Total across processes.
Definition: struct.h:70
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.h:165
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CheckMPI_Summary()

void CheckMPI_Summary ( struct BindStruct X)

Print infomation of MPI parallelization Modify Definelist::Tpow in the inter process region.

Author
Mitsuaki Kawamura (The University of Tokyo)

Print the configuration in the inter process region of each PE as a binary (excepting general spin) format.

Reset DefineList::Tpow[DefNsite], DefineList::Tpow[DefNsite + 1] ... as inter process space For Hubbard & Kondo system, define DefineList::OrgTpow which is not affected by the number of processes.

Parameters
[in,out]X

Definition at line 310 of file CheckMPI.c.

References BindStruct::Check, BindStruct::Def, exitMPI(), FALSE, DefineList::iCalcModel, CheckList::idim_max, CheckList::idim_maxMPI, DefineList::iFlgGeneralSpin, DefineList::iFlgScaLAPACK, myrank, DefineList::Ndown, DefineList::Ne, nproc, DefineList::Nsite, DefineList::NsiteMPI, DefineList::Nup, DefineList::OrgTpow, DefineList::SiteToBit, stdoutMPI, SumMPI_i(), SumMPI_li(), DefineList::Total2Sz, and DefineList::Tpow.

Referenced by check().

310  {
311 
312  int isite, iproc, SmallDim, SpinNum, Nelec;
313  unsigned long int idimMPI;
314 
315  if(X->Def.iFlgScaLAPACK == 0) {
316  fprintf(stdoutMPI, "\n\n###### MPI site separation summary ######\n\n");
317  fprintf(stdoutMPI, " INTRA process site\n");
318  fprintf(stdoutMPI, " Site Bit\n");
319  for (isite = 0; isite < X->Def.Nsite; isite++) {
320  switch (X->Def.iCalcModel) {
321  case HubbardGC:
322  case Hubbard:
323  case HubbardNConserved:
324  case Kondo:
325  case KondoGC:
326 
327  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
328  break;
329 
330  case Spin:
331  case SpinGC:
332 
333  if (X->Def.iFlgGeneralSpin == FALSE) {
334  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
335  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
336  else {
337  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
338  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
339 
340  break;
341 
342  } /*switch (X->Def.iCalcModel)*/
343  } /*for (isite = 0; isite < X->Def.Nsite; isite++)*/
344 
345  fprintf(stdoutMPI, "\n INTER process site\n");
346  fprintf(stdoutMPI, " Site Bit\n");
347  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
348  switch (X->Def.iCalcModel) {
349  case HubbardGC:
350  case Hubbard:
351  case HubbardNConserved:
352  case Kondo:
353  case KondoGC:
354 
355  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
356  break;
357 
358  case Spin:
359  case SpinGC:
360 
361  if (X->Def.iFlgGeneralSpin == FALSE) {
362  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
363  }/*if (X->Def.iFlgGeneralSpin == FALSE) */
364  else {
365  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
366  }/*if (X->Def.iFlgGeneralSpin == TRUE) */
367 
368  break;
369 
370  }/*switch (X->Def.iCalcModel)*/
371  }/*for (isite = X->Def.Nsite; isite < NsiteMPI; isite++)*/
372 
373  fprintf(stdoutMPI, "\n Process element info\n");
374  fprintf(stdoutMPI, " Process Dimension Nup Ndown Nelec Total2Sz State\n");
375 
376  for (iproc = 0; iproc < nproc; iproc++) {
377 
378  fprintf(stdoutMPI, " %7d", iproc);
379 
380  if (myrank == iproc) idimMPI = X->Check.idim_max;
381  else idimMPI = 0;
382  fprintf(stdoutMPI, " %15ld", SumMPI_li(idimMPI));
383 
384  if (myrank == iproc) Nelec = X->Def.Nup;
385  else Nelec = 0;
386  fprintf(stdoutMPI, " %4d", SumMPI_i(Nelec));
387 
388  if (myrank == iproc) Nelec = X->Def.Ndown;
389  else Nelec = 0;
390  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
391 
392  if (myrank == iproc) {
393  Nelec = X->Def.Ne; //X->Def.Nup
394  if (X->Def.iCalcModel == Spin || X->Def.iCalcModel == SpinGC) Nelec += X->Def.Ndown;
395  } else Nelec = 0;
396 
397  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
398 
399  if (myrank == iproc) Nelec = X->Def.Total2Sz;
400  else Nelec = 0;
401  fprintf(stdoutMPI, " %8d ", SumMPI_i(Nelec));
406  switch (X->Def.iCalcModel) {
407  case HubbardGC: /****************************************************/
408  case Hubbard:
409  case HubbardNConserved:
410  case Kondo:
411  case KondoGC:
412 
413  SmallDim = iproc;
414  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
415  SpinNum = SmallDim % 4;
416  SmallDim /= 4;
417  if (SpinNum == 0) fprintf(stdoutMPI, "00");
418  else if (SpinNum == 1) fprintf(stdoutMPI, "01");
419  else if (SpinNum == 2) fprintf(stdoutMPI, "10");
420  else if (SpinNum == 3) fprintf(stdoutMPI, "11");
421  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
422 
423  break;
424 
425  case Spin:
426  case SpinGC:
427 
428  SmallDim = iproc;
429  if (X->Def.iFlgGeneralSpin == FALSE) {
430  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
431  SpinNum = SmallDim % 2;
432  SmallDim /= 2;
433  fprintf(stdoutMPI, "%1d", SpinNum);
434  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
435  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
436  else {
437  SmallDim = iproc;
438  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
439  SpinNum = SmallDim % (int) X->Def.SiteToBit[isite];
440  SmallDim /= X->Def.SiteToBit[isite];
441  fprintf(stdoutMPI, "%1d", SpinNum);
442  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
443  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
444 
445  break;
446 
447  }/*switch (X->Def.iCalcModel)*/
448  fprintf(stdoutMPI, "\n");
449  }/*for (iproc = 0; iproc < nproc; iproc++)*/
450 
452  fprintf(stdoutMPI, "\n Total dimension : %ld\n\n", X->Check.idim_maxMPI);
453  if (X->Check.idim_maxMPI < 1) {
454  fprintf(stdoutMPI, "ERROR! Total dimension < 1\n");
455  exitMPI(-1);
456  }
457  }
458  else{
459  fprintf(stdoutMPI, "\n Total dimension : %ld\n\n", X->Check.idim_max);
460  }
461 
468  switch (X->Def.iCalcModel) {
469  case HubbardGC: /****************************************************/
470  case Hubbard:
471  case HubbardNConserved:
472  case Kondo:
473  case KondoGC:
474 
475  X->Def.Tpow[2 * X->Def.Nsite] = 1;
476  for (isite = 2 * X->Def.Nsite + 1; isite < 2 * X->Def.NsiteMPI; isite++)
477  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
478 
479  X->Def.OrgTpow[0]=1;
480  for (isite = 1; isite < 2 * X->Def.NsiteMPI; isite++)
481  X->Def.OrgTpow[isite] = X->Def.OrgTpow[isite-1]*2;
482 
483  break;
484 
485  case SpinGC:/********************************************************/
486  case Spin:
487 
488  if (X->Def.iFlgGeneralSpin == FALSE) {
489 
490  X->Def.Tpow[X->Def.Nsite] = 1;
491  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
492  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
493 
494  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
495  else{
496 
497  X->Def.Tpow[X->Def.Nsite] = 1;
498  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
499  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * X->Def.SiteToBit[isite - 1];
500 
501  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
502  break;
503  } /*switch (X->Def.iCalcModel)*/
504 }/*void CheckMPI_Summary*/
void exitMPI(int errorcode)
MPI Abortation wrapper.
Definition: wrapperMPI.c:86
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.h:410
unsigned long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.h:303
int Total2Sz
Total in this process.
Definition: struct.h:69
unsigned int Nup
Number of spin-up electrons in this process.
Definition: struct.h:58
unsigned long int idim_maxMPI
The total dimension across process.
Definition: struct.h:304
int iFlgScaLAPACK
ScaLAPACK mode ( only for FullDiag )
Definition: struct.h:235
long unsigned int * OrgTpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:92
unsigned int Ndown
Number of spin-down electrons in this process.
Definition: struct.h:59
int nproc
Number of processors, defined in InitializeMPI()
Definition: global.h:162
unsigned int Nsite
Number of sites in the INTRA process region.
Definition: struct.h:56
long unsigned int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.h:90
int SumMPI_i(int idim)
MPI wrapper function to obtain sum of integer across processes.
Definition: wrapperMPI.c:256
unsigned int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.h:57
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.h:86
#define FALSE
Definition: global.h:25
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.h:94
unsigned int Ne
Number of electrons in this process.
Definition: struct.h:71
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.h:198
int myrank
Process ID, defined in InitializeMPI()
Definition: global.h:163
struct CheckList Check
Size of the Hilbert space.
Definition: struct.h:411
unsigned long int SumMPI_li(unsigned long int idim)
MPI wrapper function to obtain sum of unsigned long integer across processes.
Definition: wrapperMPI.c:239
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.h:165
+ Here is the call graph for this function:
+ Here is the caller graph for this function: