20 #include "wrapperMPI.h" 34 static unsigned long int mfint[7];
61 unsigned long int i=0;
62 unsigned long int j=0;
63 unsigned long int k=0;
64 lui_malloc1(
X->Def.Tpow, 2*
X->Def.Nsite+2);
65 lui_malloc1(
X->Def.OrgTpow, 2*
X->Def.Nsite+2);
66 for(i=0; i<2*
X->Def.Nsite+2; i++){
70 li_malloc1(
X->Def.SiteToBit,
X->Def.Nsite+1);
71 for(i=0; i<
X->Def.Nsite+1; i++){
72 X->Def.SiteToBit[i]=0;
75 i_malloc1(
X->Def.LocSpn,
X->Def.Nsite);
76 d_malloc1(
X->Phys.spin_real_cor,
X->Def.Nsite*
X->Def.Nsite);
77 d_malloc1(
X->Phys.charge_real_cor,
X->Def.Nsite*
X->Def.Nsite);
78 d_malloc1(
X->Phys.loc_spin_z,
X->Def.Nsite*
X->Def.Nsite);
80 i_malloc1(
X->Def.EDChemi,
X->Def.EDNChemi+
X->Def.NInterAll+
X->Def.NTransfer);
81 i_malloc1(
X->Def.EDSpinChemi,
X->Def.EDNChemi+
X->Def.NInterAll+
X->Def.NTransfer);
82 d_malloc1(
X->Def.EDParaChemi,
X->Def.EDNChemi+
X->Def.NInterAll+
X->Def.NTransfer);
84 i_malloc2(
X->Def.GeneralTransfer,
X->Def.NTransfer, 4);
85 c_malloc1(
X->Def.ParaGeneralTransfer,
X->Def.NTransfer);
87 if(
X->Def.iCalcType == TimeEvolution){
88 i_malloc2(
X->Def.EDGeneralTransfer,
X->Def.NTransfer+
X->Def.NTETransferMax, 4);
89 c_malloc1(
X->Def.EDParaGeneralTransfer,
X->Def.NTransfer+
X->Def.NTETransferMax);
93 i_malloc2(
X->Def.EDGeneralTransfer,
X->Def.NTransfer, 4);
94 c_malloc1(
X->Def.EDParaGeneralTransfer,
X->Def.NTransfer);
96 i_malloc2(
X->Def.CoulombIntra,
X->Def.NCoulombIntra, 1);
97 d_malloc1(
X->Def.ParaCoulombIntra,
X->Def.NCoulombIntra);
98 i_malloc2(
X->Def.CoulombInter,
X->Def.NCoulombInter+
X->Def.NIsingCoupling, 2);
99 d_malloc1(
X->Def.ParaCoulombInter,
X->Def.NCoulombInter+
X->Def.NIsingCoupling);
100 i_malloc2(
X->Def.HundCoupling,
X->Def.NHundCoupling+
X->Def.NIsingCoupling, 2);
101 d_malloc1(
X->Def.ParaHundCoupling,
X->Def.NHundCoupling+
X->Def.NIsingCoupling);
102 i_malloc2(
X->Def.PairHopping,
X->Def.NPairHopping, 2);
103 d_malloc1(
X->Def.ParaPairHopping,
X->Def.NPairHopping);
104 i_malloc2(
X->Def.ExchangeCoupling,
X->Def.NExchangeCoupling, 2);
105 d_malloc1(
X->Def.ParaExchangeCoupling,
X->Def.NExchangeCoupling);
106 i_malloc2(
X->Def.PairLiftCoupling,
X->Def.NPairLiftCoupling, 2);
107 d_malloc1(
X->Def.ParaPairLiftCoupling,
X->Def.NPairLiftCoupling);
109 i_malloc2(
X->Def.InterAll,
X->Def.NInterAll, 8);
110 c_malloc1(
X->Def.ParaInterAll,
X->Def.NInterAll);
112 i_malloc2(
X->Def.CisAjt,
X->Def.NCisAjt, 4);
113 i_malloc2(
X->Def.CisAjtCkuAlvDC,
X->Def.NCisAjtCkuAlvDC, 8);
115 i_malloc2(
X->Def.SingleExcitationOperator,
X->Def.NSingleExcitationOperator, 3);
116 c_malloc1(
X->Def.ParaSingleExcitationOperator,
X->Def.NSingleExcitationOperator);
117 i_malloc2(
X->Def.PairExcitationOperator,
X->Def.NPairExcitationOperator, 5);
118 c_malloc1(
X->Def.ParaPairExcitationOperator,
X->Def.NPairExcitationOperator);
120 d_malloc1(
X->Def.ParaLaser,
X->Def.NLaser);
122 unsigned int ipivot,iarrayJ,ispin;
124 for (ipivot = 0; ipivot < xBoost->
R0 * xBoost->
num_pivot; ipivot++) {
129 for (ipivot = 0; ipivot < xBoost->
R0 * xBoost->
num_pivot; ipivot++) {
131 for (ispin = 0; ispin < 7; ispin++) {
132 xBoost->
list_6spin_pair[ipivot][ispin] = (
int *)malloc(
sizeof(
int) * 15);
136 xBoost->
arrayJ = (
double complex ***)malloc(
sizeof(
double complex**) * xBoost->
NumarrayJ);
137 for (iarrayJ = 0; iarrayJ < xBoost->
NumarrayJ; iarrayJ++) {
138 xBoost->
arrayJ[iarrayJ] = (
double complex **)malloc(
sizeof(
double complex*) * 3);
139 for (i = 0; i < 3; i++) {
140 xBoost->
arrayJ[iarrayJ][i] = (
double complex *)malloc(
sizeof(
double complex) * 3);
145 NInterAllSet= (
X->Def.iCalcType==TimeEvolution) ?
X->Def.NInterAll+
X->Def.NTEInterAllMax:
X->Def.NInterAll;
146 i_malloc2(
X->Def.InterAll_OffDiagonal, NInterAllSet, 8);
147 c_malloc1(
X->Def.ParaInterAll_OffDiagonal, NInterAllSet);
148 i_malloc2(
X->Def.InterAll_Diagonal, NInterAllSet, 4);
149 d_malloc1(
X->Def.ParaInterAll_Diagonal, NInterAllSet);
151 if (
X->Def.iCalcType == TimeEvolution){
152 d_malloc1(
X->Def.TETime,
X->Def.NTETimeSteps);
154 ui_malloc1(
X->Def.NTETransfer,
X->Def.NTETimeSteps);
155 ui_malloc1(
X->Def.NTETransferDiagonal,
X->Def.NTETimeSteps);
156 i_malloc3(
X->Def.TETransfer,
X->Def.NTETimeSteps,
X->Def.NTETransferMax, 4);
157 i_malloc3(
X->Def.TETransferDiagonal,
X->Def.NTETimeSteps,
X->Def.NTETransferMax, 2);
158 c_malloc2(
X->Def.ParaTETransfer,
X->Def.NTETimeSteps,
X->Def.NTETransferMax);
159 d_malloc2(
X->Def.ParaTETransferDiagonal,
X->Def.NTETimeSteps,
X->Def.NTETransferMax);
161 ui_malloc1(
X->Def.NTEInterAll,
X->Def.NTETimeSteps);
162 ui_malloc1(
X->Def.NTEInterAllDiagonal,
X->Def.NTETimeSteps);
163 i_malloc3(
X->Def.TEInterAll,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax, 8);
164 i_malloc3(
X->Def.TEInterAllDiagonal,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax, 4);
165 c_malloc2(
X->Def.ParaTEInterAll,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
166 d_malloc2(
X->Def.ParaTEInterAllDiagonal,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
167 ui_malloc1(
X->Def.NTEInterAllOffDiagonal,
X->Def.NTETimeSteps);
168 i_malloc3(
X->Def.TEInterAllOffDiagonal,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax, 8);
169 c_malloc2(
X->Def.ParaTEInterAllOffDiagonal,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
172 ui_malloc1(
X->Def.NTEChemi,
X->Def.NTETimeSteps);
173 i_malloc2(
X->Def.TEChemi,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
174 i_malloc2(
X->Def.SpinTEChemi,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
175 d_malloc2(
X->Def.ParaTEChemi,
X->Def.NTETimeSteps,
X->Def.NTEInterAllMax);
177 for(i = 0; i <
X->Def.NTETimeSteps; i++){
179 X->Def.NTETransfer[i]=0;
180 X->Def.NTETransferDiagonal[i]=0;
181 X->Def.NTEChemi[i]=0;
183 X->Def.NTEInterAll[i] = 0;
184 X->Def.NTEInterAllDiagonal[i] = 0;
185 X->Def.NTEInterAllOffDiagonal[i] = 0;
186 for(j = 0; j <
X->Def.NTETransferMax; j++) {
187 X->Def.ParaTETransfer[i][j]=0;
188 X->Def.ParaTETransferDiagonal[i][j]=0;
189 for(k = 0; k< 4; k++){
190 X->Def.TETransfer[i][j][k]=0;
192 for(k = 0; k< 2; k++){
193 X->Def.TETransferDiagonal[i][j][k]=0;
196 for(j = 0; j <
X->Def.NTEInterAllMax; j++){
197 X->Def.ParaTEInterAll[i][j]=0;
198 X->Def.ParaTEInterAllDiagonal[i][j]=0;
199 X->Def.ParaTEInterAllOffDiagonal[i][j]=0;
200 X->Def.TEChemi[i][j]=0;
201 X->Def.SpinTEChemi[i][j]=0;
202 X->Def.ParaTEChemi[i][j]=0;
203 for(k = 0; k< 4; k++){
204 X->Def.TEInterAllDiagonal[i][j][k]=0;
206 for(k = 0; k< 8; k++){
207 X->Def.TEInterAll[i][j][k]=0;
208 X->Def.TEInterAllOffDiagonal[i][j][k]=0;
228 unsigned long int j = 0;
229 unsigned long int idim_maxMPI;
234 lui_malloc1(
list_1,
X->Check.idim_max + 1);
237 for (j = 0; j <
X->Check.idim_max + 1; j++) {
241 lui_malloc1(
list_2_1,
X->Large.SizeOflist_2_1);
242 lui_malloc1(
list_2_2,
X->Large.SizeOflist_2_2);
249 for (j = 0; j <
X->Check.idim_max + 1; j++) {
252 for (j = 0; j <
X->Large.SizeOflist_2_1; j++) {
255 for (j = 0; j <
X->Large.SizeOflist_2_2; j++) {
261 c_malloc1(
v0,
X->Check.idim_max + 1);
262 c_malloc1(
v1,
X->Check.idim_max + 1);
263 for (j = 0; j <
X->Check.idim_max + 1; j++) {
268 if (
X->Def.iCalcType == TimeEvolution) {
269 c_malloc1(
v2,
X->Check.idim_max + 1);
274 c_malloc1(
v1buf, idim_maxMPI + 1);
275 for (j = 0; j <
X->Check.idim_max + 1; j++) {
279 if (
X->Def.iCalcType == TPQCalc) {
283 c_malloc1(
vg,
X->Check.idim_max + 1);
284 for (j = 0; j <
X->Check.idim_max + 1; j++) {
288 d_malloc1(
alpha,
X->Def.Lanczos_max + 1);
289 d_malloc1(
beta,
X->Def.Lanczos_max + 1);
300 if (
X->Def.iCalcType == TPQCalc ||
X->Def.iFlgCalcSpec != CALCSPEC_NOT) {
301 c_malloc2(
vec,
X->Def.Lanczos_max + 1,
X->Def.Lanczos_max + 1);
302 }
else if (
X->Def.iCalcType == Lanczos ||
X->Def.iCalcType == CG) {
303 if (
X->Def.LanczosTarget >
X->Def.nvec) {
304 c_malloc2(
vec,
X->Def.LanczosTarget + 1,
X->Def.Lanczos_max + 1);
306 c_malloc2(
vec,
X->Def.nvec + 1,
X->Def.Lanczos_max + 1);
310 if (
X->Def.iCalcType == FullDiag) {
311 d_malloc1(
X->Phys.all_num_down,
X->Check.idim_max + 1);
312 d_malloc1(
X->Phys.all_num_up,
X->Check.idim_max + 1);
313 d_malloc1(
X->Phys.all_energy,
X->Check.idim_max + 1);
314 d_malloc1(
X->Phys.all_doublon,
X->Check.idim_max + 1);
315 d_malloc1(
X->Phys.all_sz,
X->Check.idim_max + 1);
316 d_malloc1(
X->Phys.all_s2,
X->Check.idim_max + 1);
317 c_malloc2(
Ham,
X->Check.idim_max + 1,
X->Check.idim_max + 1);
318 c_malloc2(
L_vec,
X->Check.idim_max + 1,
X->Check.idim_max + 1);
320 if (
X->Phys.all_num_down == NULL
321 ||
X->Phys.all_num_up == NULL
322 ||
X->Phys.all_energy == NULL
323 ||
X->Phys.all_doublon == NULL
324 ||
X->Phys.all_s2 == NULL
328 for (j = 0; j <
X->Check.idim_max + 1; j++) {
329 if (
Ham[j] == NULL ||
L_vec[j] == NULL) {
333 }
else if (
X->Def.iCalcType == CG) {
334 d_malloc1(
X->Phys.all_num_down,
X->Def.k_exct);
335 d_malloc1(
X->Phys.all_num_up,
X->Def.k_exct);
336 d_malloc1(
X->Phys.all_energy,
X->Def.k_exct);
337 d_malloc1(
X->Phys.all_doublon,
X->Def.k_exct);
338 d_malloc1(
X->Phys.all_sz,
X->Def.k_exct);
339 d_malloc1(
X->Phys.all_s2,
X->Def.k_exct);
356 int **InterAllOffDiagonal,
357 double complex *ParaInterAllOffDiagonal,
358 int **InterAllDiagonal,
359 double *ParaInterAllDiagonal,
362 i_malloc2(InterAllOffDiagonal, NInterAll, 8);
363 c_malloc1(ParaInterAllOffDiagonal, NInterAll);
364 i_malloc2(InterAllDiagonal, NInterAll, 4);
365 d_malloc1(ParaInterAllDiagonal, NInterAll);
386 switch (
X->Def.iCalcModel) {
389 case HubbardNConserved:
392 if (
X->Def.iFlgGeneralSpin ==
FALSE) {
393 if (
X->Def.iCalcModel == Spin &&
X->Def.Nsite % 2 == 1) {
394 X->Large.SizeOflist_2_1 =
X->Check.sdim * 2 + 2;
396 X->Large.SizeOflist_2_1 =
X->Check.sdim + 2;
398 X->Large.SizeOflist_2_2 =
X->Check.sdim + 2;
399 X->Large.SizeOflistjb =
X->Check.sdim + 2;
401 X->Large.SizeOflist_2_1 =
X->Check.sdim + 2;
402 X->Large.SizeOflist_2_2 =
403 X->Def.Tpow[
X->Def.Nsite - 1] *
X->Def.SiteToBit[
X->Def.Nsite - 1] /
X->Check.sdim + 2;
404 X->Large.SizeOflistjb =
405 X->Def.Tpow[
X->Def.Nsite - 1] *
X->Def.SiteToBit[
X->Def.Nsite - 1] /
X->Check.sdim + 2;
long unsigned int * list_1buf
long unsigned int num_pivot
void setmem_def(struct BindStruct *X, struct BoostList *xBoost)
Set size of memories for Def and Phys in BindStruct.
int setmem_large(struct BindStruct *X)
Set size of memories for Hamiltonian (Ham, L_vec), vectors(vg, v0, v1, v2, vec, alpha, beta), lists (list_1, list_2_1, list_2_2, list_Diagonal) and Phys(BindStruct.PhysList) struct in the case of Full Diag mode.
double complex *** arrayJ
const char * cProFinishAlloc
unsigned long int MaxMPI_li(unsigned long int idim)
MPI wrapper function to obtain maximum unsigned long integer across processes.
long unsigned int * list_2_1
static unsigned long int mfint[7]
void setmem_HEAD(struct BindStruct *X)
Set size of memories headers of output files.
int GetlistSize(struct BindStruct *X)
Set size of lists for the canonical ensemble.
long unsigned int * list_1
long unsigned int * list_2_2
void setmem_IntAll_Diagonal(int **InterAllOffDiagonal, double complex *ParaInterAllOffDiagonal, int **InterAllDiagonal, double *ParaInterAllDiagonal, const int NInterAll)
Set the size of memories for InterAllDiagonal and InterAllOffDiagonal arrays.
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()