MPI: added cylindrical multigrid support

The cylindrical multigrid scheme needed a workaround:
An additional barrier is necessary to prevent a simultaneous MPI comm access.

Signed-off-by: Thorsten Liebig <Thorsten.Liebig@gmx.de>
This commit is contained in:
Thorsten Liebig 2011-02-23 11:08:24 +01:00
parent 14e12f9138
commit 20d6547235
7 changed files with 88 additions and 4 deletions

View File

@ -62,6 +62,11 @@ Engine_CylinderMultiGrid::Engine_CylinderMultiGrid(const Operator_CylinderMultiG
Engine_CylinderMultiGrid::~Engine_CylinderMultiGrid()
{
#ifdef MPI_SUPPORT
delete m_InnerEngine->m_MPI_Barrier;
m_InnerEngine->m_MPI_Barrier = NULL;
#endif
m_Thread_NumTS = 0;
m_startBarrier->wait();
@ -81,7 +86,6 @@ Engine_CylinderMultiGrid::~Engine_CylinderMultiGrid()
m_startBarrier = NULL;
delete m_stopBarrier;
m_stopBarrier = NULL;
}
void Engine_CylinderMultiGrid::Init()
@ -103,6 +107,11 @@ void Engine_CylinderMultiGrid::Init()
m_InnerEngine->SortExtensionByPriority();
SortExtensionByPriority();
#ifdef MPI_SUPPORT
//assign an MPI barrier to inner Engine
m_InnerEngine->m_MPI_Barrier = new boost::barrier(2);
#endif
}
bool Engine_CylinderMultiGrid::IterateTS(unsigned int iterTS)
@ -246,9 +255,29 @@ void Engine_CylinderMultiGrid::InterpolCurrChild2Base(unsigned int rzPlane)
f4_curr[2][pos[0]][pos[1]][pos[2]].v = m_InnerEngine->f4_curr[2][pos[0]][pos[1]/2][pos[2]].v + one_fourth.v * m_InnerEngine->f4_curr[2][pos[0]][pos[1]/2][pos[2]].v - one_fourth.v * m_InnerEngine->f4_curr[2][pos[0]][pos[1]/2-1][pos[2]].v;
}
}
}
#ifdef MPI_SUPPORT
void Engine_CylinderMultiGrid::SendReceiveVoltages()
{
//do the local voltage sync, child is waiting...
Engine_Multithread::SendReceiveVoltages();
//run inner voltage sync
m_InnerEngine->m_MPI_Barrier->wait();
}
void Engine_CylinderMultiGrid::SendReceiveCurrents()
{
//do the local current sync, child is waiting...
Engine_Multithread::SendReceiveCurrents();
//run inner voltage sync
m_InnerEngine->m_MPI_Barrier->wait();
}
#endif
/****************************************************************************************/
Engine_CylinderMultiGrid_Thread::Engine_CylinderMultiGrid_Thread( Engine_Multithread* engine, boost::barrier *start, boost::barrier *stop, volatile unsigned int* numTS, bool isBase)
{

View File

@ -60,6 +60,11 @@ protected:
boost::barrier *m_WaitOnSync;
Engine_Ext_CylinderMultiGrid* m_Eng_Ext_MG;
#ifdef MPI_SUPPORT
virtual void SendReceiveVoltages();
virtual void SendReceiveCurrents();
#endif
};

View File

@ -49,9 +49,9 @@ protected:
float* m_BufferDown[3];
//! Transfer all tangential voltages at the upper bounds to the lower bounds of the neighbouring MPI-processes
void SendReceiveVoltages();
virtual void SendReceiveVoltages();
//! Transfer all tangential currents at the lower bounds to the upper bounds of the neighbouring MPI-processes
void SendReceiveCurrents();
virtual void SendReceiveCurrents();
};
#endif // ENGINE_MPI_H

View File

@ -51,6 +51,10 @@ Engine_Multithread::Engine_Multithread(const Operator_Multithread* op) : ENGINE_
m_IterateBarrier = 0;
m_startBarrier = 0;
m_stopBarrier = 0;
#ifdef ENABLE_DEBUG_TIME
m_MPI_Barrier = 0;
#endif
}
Engine_Multithread::~Engine_Multithread()
@ -100,6 +104,9 @@ void Engine_Multithread::Init()
m_startBarrier = new boost::barrier(m_numThreads+1); // numThread workers + 1 controller
m_stopBarrier = new boost::barrier(m_numThreads+1); // numThread workers + 1 controller
#ifdef MPI_SUPPORT
m_MPI_Barrier = 0;
#endif
for (unsigned int n=0; n<m_numThreads; n++)
{
@ -279,7 +286,11 @@ void thread::operator()()
#ifdef MPI_SUPPORT
if (m_threadID==0)
{
if (m_enginePtr->m_MPI_Barrier)
m_enginePtr->m_MPI_Barrier->wait();
m_enginePtr->SendReceiveVoltages();
}
m_enginePtr->m_IterateBarrier->wait();
#endif
@ -312,7 +323,11 @@ void thread::operator()()
#ifdef MPI_SUPPORT
if (m_threadID==0)
{
if (m_enginePtr->m_MPI_Barrier)
m_enginePtr->m_MPI_Barrier->wait();
m_enginePtr->SendReceiveCurrents();
}
m_enginePtr->m_IterateBarrier->wait();
#endif

View File

@ -110,6 +110,15 @@ protected:
unsigned int m_numThreads; //!< number of worker threads
volatile bool m_stopThreads;
#ifdef MPI_SUPPORT
/*! Workaround needed for subgridding scheme... (see Engine_CylinderMultiGrid)
Some engines may need an additional barrier for synchronizing MPI communication.
This engine will not initialize or cleanup this barrier, but check for it and wait before executing any MPI sync.
Make sure to cleanup (delete) this barriere before Engine_Multithread::Reset() is called.
*/
boost::barrier *m_MPI_Barrier;
#endif
#ifdef ENABLE_DEBUG_TIME
std::map<boost::thread::id, std::vector<double> > m_timer_list;
#endif

View File

@ -127,6 +127,26 @@ void Operator_CylinderMultiGrid::Init()
m_InnerOp = Operator_CylinderMultiGrid::New(m_Split_Radii,m_numThreads);
}
#ifdef MPI_SUPPORT
void Operator_CylinderMultiGrid::SetTag(int tag)
{
m_MyTag = tag;
m_InnerOp->SetTag(tag+1);
}
void Operator_CylinderMultiGrid::SetNeighborUp(int ny, int id)
{
Operator_Cylinder::SetNeighborUp(ny,id);
m_InnerOp->SetNeighborUp(ny,id);
}
void Operator_CylinderMultiGrid::SetNeighborDown(int ny, int id)
{
Operator_Cylinder::SetNeighborDown(ny,id);
m_InnerOp->SetNeighborDown(ny,id);
}
#endif
void Operator_CylinderMultiGrid::CalcStartStopLines(unsigned int &numThreads, vector<unsigned int> &start, vector<unsigned int> &stop) const
{
vector<unsigned int> jpt = AssignJobs2Threads(numLines[0]- m_Split_Pos + 1, numThreads, true);

View File

@ -53,6 +53,12 @@ public:
virtual void ShowStat() const;
#ifdef MPI_SUPPORT
virtual void SetTag(int tag);
virtual void SetNeighborUp(int ny, int id);
virtual void SetNeighborDown(int ny, int id);
#endif
protected:
Operator_CylinderMultiGrid(vector<double> Split_Radii);
virtual void Init();