#include <gromacs/ewald/pme_coordinate_receiver_gpu_impl.h>
Class with interfaces and data for CUDA version of PME coordinate receiving functionality.
Impl class stub.
|
| Impl (MPI_Comm comm, const DeviceContext &deviceContext, gmx::ArrayRef< const PpRanks > ppRanks) |
| Creates PME GPU coordinate receiver object. More...
|
|
void | reinitCoordinateReceiver (DeviceBuffer< RVec > d_x) |
| Re-initialize: set atom ranges and, for thread-MPI case, send coordinates buffer address to PP rank. This is required after repartitioning since atom ranges and buffer allocations may have changed. More...
|
|
void | receiveCoordinatesSynchronizerFromPpPeerToPeer (int ppRank) |
| Receive coordinate synchronizer pointer from the PP ranks. More...
|
|
void | launchReceiveCoordinatesFromPpGpuAwareMpi (DeviceBuffer< RVec > recvbuf, int numAtoms, int numBytes, int ppRank, int senderIndex) |
| Used for lib MPI, receives co-ordinates from PP ranks. More...
|
|
std::tuple< int,
GpuEventSynchronizer * > | receivePpCoordinateSendEvent (int pipelineStage) |
| Return PP co-ordinate transfer event received from PP rank determined from pipeline stage, for consumer to enqueue. More...
|
|
int | waitForCoordinatesFromAnyPpRank () |
| Wait for coordinates from any PP rank. More...
|
|
DeviceStream * | ppCommStream (int senderIndex) |
| Return pointer to stream associated with specific PP rank sender index. More...
|
|
std::tuple< int, int > | ppCommAtomRange (int senderIndex) |
| Returns range of atoms involved in communication associated with specific PP rank sender index. More...
|
|
int | ppCommNumSenderRanks () |
| Return number of PP ranks involved in PME-PP communication.
|
|
void | insertAsDependencyIntoStream (int senderIndex, const DeviceStream &stream) |
| Mark an event in the sender stream senderIndex and enqueue it into stream .
|
|
gmx::PmeCoordinateReceiverGpu::Impl::Impl |
( |
MPI_Comm |
comm, |
|
|
const DeviceContext & |
deviceContext, |
|
|
gmx::ArrayRef< const PpRanks > |
ppRanks |
|
) |
| |
Creates PME GPU coordinate receiver object.
- Parameters
-
[in] | comm | Communicator used for simulation |
[in] | deviceContext | GPU context |
[in] | ppRanks | List of PP ranks |
void gmx::PmeCoordinateReceiverGpu::Impl::launchReceiveCoordinatesFromPpGpuAwareMpi |
( |
DeviceBuffer< RVec > |
recvbuf, |
|
|
int |
numAtoms, |
|
|
int |
numBytes, |
|
|
int |
ppRank, |
|
|
int |
senderIndex |
|
) |
| |
Used for lib MPI, receives co-ordinates from PP ranks.
Receive coordinate data using GPU-aware MPI.
- Parameters
-
[in] | recvbuf | coordinates buffer in GPU memory |
[in] | numAtoms | starting element in buffer |
[in] | numBytes | number of bytes to transfer |
[in] | ppRank | PP rank to send data |
[in] | senderIndex | Index of PP rank within those involved in communication with this PME rank |
std::tuple< int, int > gmx::PmeCoordinateReceiverGpu::Impl::ppCommAtomRange |
( |
int |
senderIndex | ) |
|
Returns range of atoms involved in communication associated with specific PP rank sender index.
- Parameters
-
[in] | senderIndex | Index of sender PP rank. |
DeviceStream * gmx::PmeCoordinateReceiverGpu::Impl::ppCommStream |
( |
int |
senderIndex | ) |
|
Return pointer to stream associated with specific PP rank sender index.
- Parameters
-
[in] | senderIndex | Index of sender PP rank. |
void gmx::PmeCoordinateReceiverGpu::Impl::receiveCoordinatesSynchronizerFromPpPeerToPeer |
( |
int |
ppRank | ) |
|
Receive coordinate synchronizer pointer from the PP ranks.
- Parameters
-
[in] | ppRank | PP rank to receive the synchronizer from. |
std::tuple< int, GpuEventSynchronizer * > gmx::PmeCoordinateReceiverGpu::Impl::receivePpCoordinateSendEvent |
( |
int |
pipelineStage | ) |
|
Return PP co-ordinate transfer event received from PP rank determined from pipeline stage, for consumer to enqueue.
- Parameters
-
[in] | pipelineStage | stage of pipeline corresponding to this transfer |
- Returns
- tuple with rank of sending PP task and corresponding event
void gmx::PmeCoordinateReceiverGpu::Impl::reinitCoordinateReceiver |
( |
DeviceBuffer< RVec > |
d_x | ) |
|
Re-initialize: set atom ranges and, for thread-MPI case, send coordinates buffer address to PP rank. This is required after repartitioning since atom ranges and buffer allocations may have changed.
- Parameters
-
[in] | d_x | coordinates buffer in GPU memory |
int gmx::PmeCoordinateReceiverGpu::Impl::waitForCoordinatesFromAnyPpRank |
( |
| ) |
|
Wait for coordinates from any PP rank.
- Returns
- rank of sending PP task
The documentation for this class was generated from the following files: