From 005eb3a4f63199d99d7ea89627e804bbefcb65df Mon Sep 17 00:00:00 2001 From: Thorsten Liebig Date: Tue, 15 Mar 2011 09:41:29 +0100 Subject: [PATCH] MPI: added an additional line at the upper MPI-bound for correct material averaging This also fixes the current send and receive issue --> Engine_MPI::SendReceiveCurrents() --- FDTD/engine_mpi.cpp | 22 ++++++---------------- FDTD/openems_fdtd_mpi.cpp | 12 ++++++++++++ FDTD/operator_cylinder.cpp | 4 ++-- FDTD/operator_mpi.cpp | 22 ++++++++-------------- FDTD/operator_mpi.h | 3 ++- 5 files changed, 30 insertions(+), 33 deletions(-) diff --git a/FDTD/engine_mpi.cpp b/FDTD/engine_mpi.cpp index 1bf85ff..9cb5d53 100644 --- a/FDTD/engine_mpi.cpp +++ b/FDTD/engine_mpi.cpp @@ -57,11 +57,11 @@ void Engine_MPI::Init() if (m_Op_MPI->m_NeighborDown[n]>=0) { - m_BufferDown[n] = new float[m_BufferSize[n]*3]; + m_BufferDown[n] = new float[m_BufferSize[n]*2]; } if (m_Op_MPI->m_NeighborUp[n]>=0) { - m_BufferUp[n] = new float[m_BufferSize[n]*3]; + m_BufferUp[n] = new float[m_BufferSize[n]*2]; } } } @@ -100,7 +100,7 @@ void Engine_MPI::SendReceiveVoltages() //send voltages unsigned int iPos=0; - pos[n]=numLines[n]-1; + pos[n]=numLines[n]-2; if (m_Op_MPI->m_NeighborUp[n]>=0) { for (pos[nP]=0; pos[nP]GetMPIEnabled()) return; @@ -152,7 +144,7 @@ void Engine_MPI::SendReceiveCurrents() //non-blocking prepare for receive... for (int n=0;n<3;++n) if (m_Op_MPI->m_NeighborUp[n]>=0) - MPI_Irecv( m_BufferUp[n] , m_BufferSize[n]*3, MPI_FLOAT, m_Op_MPI->m_NeighborUp[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Recv_Request[n]); + MPI_Irecv( m_BufferUp[n] , m_BufferSize[n]*2, MPI_FLOAT, m_Op_MPI->m_NeighborUp[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Recv_Request[n]); for (int n=0;n<3;++n) { @@ -168,16 +160,15 @@ void Engine_MPI::SendReceiveCurrents() { for (pos[nPP]=0; pos[nPP]m_NeighborDown[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Send_Request[n]); + MPI_Isend( m_BufferDown[n] , m_BufferSize[n]*2, MPI_FLOAT, m_Op_MPI->m_NeighborDown[n], m_Op_MPI->m_MyTag, MPI_COMM_WORLD, &Send_Request[n]); } //receive currents - pos[n]=numLines[n]-1; + pos[n]=numLines[n]-2; iPos=0; if (m_Op_MPI->m_NeighborUp[n]>=0) { @@ -187,7 +178,6 @@ void Engine_MPI::SendReceiveCurrents() { for (pos[nPP]=0; pos[nPP]0) m_MPI_Op->SetNeighborDown(0,procTable[i-1][j][k]); if (iAddDiscLine(0, m_Original_Grid->GetLine(0,SplitNumber[0].at(i+1)+1 )); m_MPI_Op->SetNeighborUp(0,procTable[i+1][j][k]); + } if (j>0) m_MPI_Op->SetNeighborDown(1,procTable[i][j-1][k]); if (jAddDiscLine(1, m_Original_Grid->GetLine(1,SplitNumber[1].at(j+1)+1 )); m_MPI_Op->SetNeighborUp(1,procTable[i][j+1][k]); + } if (k>0) m_MPI_Op->SetNeighborDown(2,procTable[i][j][k-1]); if (kAddDiscLine(2, m_Original_Grid->GetLine(2,SplitNumber[2].at(k+1)+1 )); m_MPI_Op->SetNeighborUp(2,procTable[i][j][k+1]); + } } } diff --git a/FDTD/operator_cylinder.cpp b/FDTD/operator_cylinder.cpp index 9f520a1..2a20e24 100644 --- a/FDTD/operator_cylinder.cpp +++ b/FDTD/operator_cylinder.cpp @@ -61,9 +61,9 @@ inline unsigned int Operator_Cylinder::GetNumberOfLines(int ny) const { //this is necessary for a correct field processing... cylindrical engine has to reset this by adding +1 if (CC_closedAlpha && ny==1) - return numLines[1]-1; + return Operator_Multithread::GetNumberOfLines(ny)-1; - return numLines[ny]; + return Operator_Multithread::GetNumberOfLines(ny); } string Operator_Cylinder::GetDirName(int ny) const diff --git a/FDTD/operator_mpi.cpp b/FDTD/operator_mpi.cpp index dafe8f1..121f560 100644 --- a/FDTD/operator_mpi.cpp +++ b/FDTD/operator_mpi.cpp @@ -77,20 +77,6 @@ void Operator_MPI::SetBoundaryCondition(int* BCs) Operator_SSE_Compressed::SetBoundaryCondition(BCs); } -void Operator_MPI::ApplyElectricBC(bool* dirs) -{ - if (!m_MPI_Enabled) - return Operator_SSE_Compressed::ApplyElectricBC(dirs); - - for (int n=0;n<3;++n) - { - //do not delete operator at upper inteface - if (m_NeighborUp[n]>=0) - dirs[2*n+1] = false; - } - Operator_SSE_Compressed::ApplyElectricBC(dirs); -} - Engine* Operator_MPI::CreateEngine() const { if (m_MPI_Enabled) @@ -171,6 +157,14 @@ void Operator_MPI::SetOriginalMesh(CSRectGrid* orig_Mesh) } } +unsigned int Operator_MPI::GetNumberOfLines(int ny) const +{ + if ((!m_MPI_Enabled) || (m_NeighborUp[ny]<0)) + return Operator_SSE_Compressed::GetNumberOfLines(ny); + + return Operator_SSE_Compressed::GetNumberOfLines(ny)-1; +} + string Operator_MPI::PrependRank(string name) { stringstream out_name; diff --git a/FDTD/operator_mpi.h b/FDTD/operator_mpi.h index 37874f6..b2b7bad 100644 --- a/FDTD/operator_mpi.h +++ b/FDTD/operator_mpi.h @@ -31,7 +31,6 @@ public: bool GetMPIEnabled() const {return m_MPI_Enabled;} virtual void SetBoundaryCondition(int* BCs); - virtual void ApplyElectricBC(bool* dirs); virtual Engine* CreateEngine() const; @@ -45,6 +44,8 @@ public: virtual void SetSplitPos(int ny, unsigned int pos) {m_SplitPos[ny]=pos;} virtual void SetOriginalMesh(CSRectGrid* orig_Mesh); + virtual unsigned int GetNumberOfLines(int ny) const; + protected: Operator_MPI(); bool m_MPI_Enabled;