Revisión | 8c5c2c662e821c33edad4f9f17a1abc96c25950c (tree) |
---|---|
Tiempo | 2013-10-09 14:15:29 |
Autor | Mikiya Fujii <mikiya.fujii@gmai...> |
Commiter | Mikiya Fujii |
AsyncCommunicator is refactored: Rename of methods. #31814
git-svn-id: https://svn.sourceforge.jp/svnroot/molds/trunk@1538 1136aad2-a195-0410-b898-f5ea1d11b9d8
@@ -1444,10 +1444,10 @@ void Cndo2::CalcFockMatrix(double** fockMatrix, | ||
1444 | 1444 | double* buff = &fockMatrix[mu][mu]; |
1445 | 1445 | MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu; |
1446 | 1446 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
1447 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
1447 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1448 | 1448 | } |
1449 | 1449 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
1450 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
1450 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1451 | 1451 | } |
1452 | 1452 | } // end of loop mu parallelized with MPI |
1453 | 1453 | } // end of loop A |
@@ -1672,10 +1672,10 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{ | ||
1672 | 1672 | double* buff = &gammaAB[A][A]; |
1673 | 1673 | MolDS_mpi::molds_mpi_int num = totalAtomNumber-A; |
1674 | 1674 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
1675 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
1675 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1676 | 1676 | } |
1677 | 1677 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
1678 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
1678 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1679 | 1679 | } |
1680 | 1680 | } // end of loop A prallelized by MPI |
1681 | 1681 | communicationThread.join(); |
@@ -1841,14 +1841,14 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix, | ||
1841 | 1841 | double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0]; |
1842 | 1842 | MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber; |
1843 | 1843 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
1844 | - asyncCommunicator.SetRecvedVector(buffX, num, source, tagX); | |
1845 | - asyncCommunicator.SetRecvedVector(buffY, num, source, tagY); | |
1846 | - asyncCommunicator.SetRecvedVector(buffZ, num, source, tagZ); | |
1844 | + asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX); | |
1845 | + asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY); | |
1846 | + asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ); | |
1847 | 1847 | } |
1848 | 1848 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
1849 | - asyncCommunicator.SetSentVector(buffX, num, dest, tagX); | |
1850 | - asyncCommunicator.SetSentVector(buffY, num, dest, tagY); | |
1851 | - asyncCommunicator.SetSentVector(buffZ, num, dest, tagZ); | |
1849 | + asyncCommunicator.SetSentMessage(buffX, num, dest, tagX); | |
1850 | + asyncCommunicator.SetSentMessage(buffY, num, dest, tagY); | |
1851 | + asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ); | |
1852 | 1852 | } |
1853 | 1853 | } // end of loop for int A with MPI |
1854 | 1854 | // Delete the communication thread. |
@@ -3965,10 +3965,10 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{ | ||
3965 | 3965 | double* buff = overlapAOs[firstAOIndexA]; |
3966 | 3966 | MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs; |
3967 | 3967 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
3968 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
3968 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3969 | 3969 | } |
3970 | 3970 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
3971 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
3971 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3972 | 3972 | } |
3973 | 3973 | } // end of loop A parallelized with MPI |
3974 | 3974 | communicationThread.join(); |
@@ -3515,7 +3515,7 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore, | ||
3515 | 3515 | OrbitalType twoElecLimit = dxy; |
3516 | 3516 | int numBuff = (twoElecLimit+1)*twoElecLimit/2; |
3517 | 3517 | int num = (totalNumberAtoms-b)*numBuff*numBuff; |
3518 | - asyncCommunicator.SetBroadcastedVector(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank); | |
3518 | + asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank); | |
3519 | 3519 | } |
3520 | 3520 | } // end of loop a parallelized with MPI |
3521 | 3521 | communicationThread.join(); |
@@ -33,23 +33,23 @@ public: | ||
33 | 33 | while(0<passingTimes){ |
34 | 34 | boost::mutex::scoped_lock lk(this->stateGuard); |
35 | 35 | try{ |
36 | - DataInfo dInfo = this->dataQueue.FrontPop(); | |
37 | - if(dInfo.mpiFuncType == MolDS_base::Send){ | |
38 | - MolDS_mpi::MpiProcess::GetInstance()->Send(dInfo.dest, | |
39 | - dInfo.tag, | |
40 | - reinterpret_cast<T*>(dInfo.vectorPtr), | |
41 | - dInfo.num); | |
36 | + MessageInfo mInfo = this->messageQueue.FrontPop(); | |
37 | + if(mInfo.mpiFuncType == MolDS_base::Send){ | |
38 | + MolDS_mpi::MpiProcess::GetInstance()->Send(mInfo.dest, | |
39 | + mInfo.tag, | |
40 | + reinterpret_cast<T*>(mInfo.vectorPtr), | |
41 | + mInfo.num); | |
42 | 42 | } |
43 | - else if(dInfo.mpiFuncType == MolDS_base::Recv){ | |
44 | - MolDS_mpi::MpiProcess::GetInstance()->Recv(dInfo.source, | |
45 | - dInfo.tag, | |
46 | - reinterpret_cast<T*>(dInfo.vectorPtr), | |
47 | - dInfo.num); | |
43 | + else if(mInfo.mpiFuncType == MolDS_base::Recv){ | |
44 | + MolDS_mpi::MpiProcess::GetInstance()->Recv(mInfo.source, | |
45 | + mInfo.tag, | |
46 | + reinterpret_cast<T*>(mInfo.vectorPtr), | |
47 | + mInfo.num); | |
48 | 48 | } |
49 | - else if(dInfo.mpiFuncType == MolDS_base::Broadcast){ | |
50 | - MolDS_mpi::MpiProcess::GetInstance()->Broadcast(reinterpret_cast<T*>(dInfo.vectorPtr), | |
51 | - dInfo.num, | |
52 | - dInfo.source); | |
49 | + else if(mInfo.mpiFuncType == MolDS_base::Broadcast){ | |
50 | + MolDS_mpi::MpiProcess::GetInstance()->Broadcast(reinterpret_cast<T*>(mInfo.vectorPtr), | |
51 | + mInfo.num, | |
52 | + mInfo.source); | |
53 | 53 | } |
54 | 54 | else{ |
55 | 55 | std::stringstream ss; |
@@ -72,51 +72,51 @@ public: | ||
72 | 72 | } |
73 | 73 | } |
74 | 74 | |
75 | - template<typename T> void SetSentVector(T* vector, | |
76 | - molds_mpi_int num, | |
77 | - int dest, | |
78 | - int tag){ | |
75 | + template<typename T> void SetSentMessage(T* vector, | |
76 | + molds_mpi_int num, | |
77 | + int dest, | |
78 | + int tag){ | |
79 | 79 | int source = NON_USED; |
80 | 80 | MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Send; |
81 | - this->SetVector(vector, num, source, dest, tag, mpiFuncType); | |
81 | + this->SetMessage(vector, num, source, dest, tag, mpiFuncType); | |
82 | 82 | } |
83 | 83 | |
84 | - template<typename T> void SetRecvedVector(T* vector, | |
85 | - molds_mpi_int num, | |
86 | - int source, | |
87 | - int tag){ | |
84 | + template<typename T> void SetRecvedMessage(T* vector, | |
85 | + molds_mpi_int num, | |
86 | + int source, | |
87 | + int tag){ | |
88 | 88 | int dest = NON_USED; |
89 | 89 | MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Recv; |
90 | - this->SetVector(vector, num, source, dest, tag, mpiFuncType); | |
90 | + this->SetMessage(vector, num, source, dest, tag, mpiFuncType); | |
91 | 91 | } |
92 | 92 | |
93 | - template<typename T> void SetBroadcastedVector(T* vector, molds_mpi_int num, int root){ | |
93 | + template<typename T> void SetBroadcastedMessage(T* vector, molds_mpi_int num, int root){ | |
94 | 94 | int source = root; |
95 | 95 | int dest = NON_USED; |
96 | 96 | int tag = NON_USED; |
97 | 97 | MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Broadcast; |
98 | - this->SetVector(vector, num, source, dest, tag, mpiFuncType); | |
98 | + this->SetMessage(vector, num, source, dest, tag, mpiFuncType); | |
99 | 99 | } |
100 | 100 | |
101 | 101 | private: |
102 | - struct DataInfo{intptr_t vectorPtr; | |
103 | - molds_mpi_int num; | |
104 | - int source; | |
105 | - int dest; | |
106 | - int tag; | |
107 | - MolDS_base::MpiFunctionType mpiFuncType;}; | |
102 | + struct MessageInfo{intptr_t vectorPtr; | |
103 | + molds_mpi_int num; | |
104 | + int source; | |
105 | + int dest; | |
106 | + int tag; | |
107 | + MolDS_base::MpiFunctionType mpiFuncType;}; | |
108 | 108 | boost::mutex stateGuard; |
109 | 109 | boost::condition stateChange; |
110 | - MolDS_base_containers::ThreadSafeQueue<DataInfo> dataQueue; | |
111 | - template<typename T> void SetVector(T* vector, | |
112 | - molds_mpi_int num, | |
113 | - int source, | |
114 | - int dest, | |
115 | - int tag, | |
116 | - MolDS_base::MpiFunctionType mpiFuncType){ | |
110 | + MolDS_base_containers::ThreadSafeQueue<MessageInfo> messageQueue; | |
111 | + template<typename T> void SetMessage(T* vector, | |
112 | + molds_mpi_int num, | |
113 | + int source, | |
114 | + int dest, | |
115 | + int tag, | |
116 | + MolDS_base::MpiFunctionType mpiFuncType){ | |
117 | 117 | boost::mutex::scoped_lock lk(this->stateGuard); |
118 | - DataInfo dInfo = {reinterpret_cast<intptr_t>(vector), num, source, dest, tag, mpiFuncType}; | |
119 | - this->dataQueue.Push(dInfo); | |
118 | + MessageInfo mInfo = {reinterpret_cast<intptr_t>(vector), num, source, dest, tag, mpiFuncType}; | |
119 | + this->messageQueue.Push(mInfo); | |
120 | 120 | this->stateChange.notify_all(); |
121 | 121 | } |
122 | 122 | }; |
@@ -2418,10 +2418,10 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{ | ||
2418 | 2418 | int num = this->matrixCISdimension - k; |
2419 | 2419 | double* buff = &this->matrixCIS[k][k]; |
2420 | 2420 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
2421 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
2421 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
2422 | 2422 | } |
2423 | 2423 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
2424 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
2424 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
2425 | 2425 | } |
2426 | 2426 | } // end of k-loop which is MPI-parallelized |
2427 | 2427 | communicationThread.join(); |
@@ -3361,10 +3361,10 @@ void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<Mo | ||
3361 | 3361 | int num = nonRedundantQIndecesSize - i; |
3362 | 3362 | double* buff = &gammaNRMinusKNR[i][i]; |
3363 | 3363 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
3364 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
3364 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3365 | 3365 | } |
3366 | 3366 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
3367 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
3367 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3368 | 3368 | } |
3369 | 3369 | } // end of loop-i parallelized with MPI |
3370 | 3370 | communicationThread.join(); |
@@ -3425,10 +3425,10 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv, | ||
3425 | 3425 | int num = redundantQIndecesSize; |
3426 | 3426 | double* buff = &kRDagerGammaRInv[i][0]; |
3427 | 3427 | if(mpiRank == mpiHeadRank && mpiRank != calcRank){ |
3428 | - asyncCommunicator.SetRecvedVector(buff, num, source, tag); | |
3428 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3429 | 3429 | } |
3430 | 3430 | if(mpiRank != mpiHeadRank && mpiRank == calcRank){ |
3431 | - asyncCommunicator.SetSentVector(buff, num, dest, tag); | |
3431 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3432 | 3432 | } |
3433 | 3433 | } // end of loop-i parallelized with MPI |
3434 | 3434 | communicationThread.join(); |