Gromacs  2020.4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Functions | Variables
#include "gmxpre.h"
#include "config.h"
#include <cstdio>
#include <cstring>
#include "gromacs/domdec/domdec.h"
#include "gromacs/domdec/domdec_struct.h"
#include "gromacs/ewald/pme.h"
#include "gromacs/ewald/pme_pp_comm_gpu.h"
#include "gromacs/gmxlib/network.h"
#include "gromacs/math/vec.h"
#include "gromacs/mdlib/gmx_omp_nthreads.h"
#include "gromacs/mdtypes/commrec.h"
#include "gromacs/mdtypes/forceoutput.h"
#include "gromacs/mdtypes/forcerec.h"
#include "gromacs/mdtypes/interaction_const.h"
#include "gromacs/mdtypes/md_enums.h"
#include "gromacs/mdtypes/state_propagator_data_gpu.h"
#include "gromacs/nbnxm/nbnxm.h"
#include "gromacs/timing/wallcycle.h"
#include "gromacs/utility/fatalerror.h"
#include "gromacs/utility/gmxmpi.h"
#include "gromacs/utility/smalloc.h"
#include "pme_internal.h"
#include "pme_pp_communication.h"
+ Include dependency graph for pme_pp.cpp:

Description

This file contains function definitions necessary for managing the offload of long-ranged PME work to separate MPI rank, for computing energies and forces (Coulomb and LJ).

Author
Berk Hess hess@.nosp@m.kth..nosp@m.se

Functions

static void gmx_pme_send_coeffs_coords_wait (gmx_domdec_t *dd)
 Wait for the pending data send requests to PME ranks to complete.
 
static void gmx_pme_send_coeffs_coords (t_forcerec *fr, const t_commrec *cr, unsigned int flags, real *chargeA, real *chargeB, real *c6A, real *c6B, real *sigmaA, real *sigmaB, const matrix box, const rvec *x, real lambda_q, real lambda_lj, int maxshift_x, int maxshift_y, int64_t step, bool useGpuPmePpComms, bool reinitGpuPmePpComms, bool sendCoordinatesFromGpu, GpuEventSynchronizer *coordinatesReadyOnDeviceEvent)
 Send data to PME ranks.
 
void gmx_pme_send_parameters (const t_commrec *cr, const interaction_const_t *ic, gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj, real *chargeA, real *chargeB, real *sqrt_c6A, real *sqrt_c6B, real *sigmaA, real *sigmaB, int maxshift_x, int maxshift_y)
 Send the charges and maxshift to out PME-only node.
 
void gmx_pme_send_coordinates (t_forcerec *fr, const t_commrec *cr, const matrix box, const rvec *x, real lambda_q, real lambda_lj, gmx_bool bEnerVir, int64_t step, bool useGpuPmePpComms, bool receiveCoordinateAddressFromPme, bool sendCoordinatesFromGpu, GpuEventSynchronizer *coordinatesReadyOnDeviceEvent, gmx_wallcycle *wcycle)
 Send the coordinates to our PME-only node and request a PME calculation.
 
void gmx_pme_send_finish (const t_commrec *cr)
 Tell our PME-only node to finish.
 
void gmx_pme_send_switchgrid (const t_commrec *cr, ivec grid_size, real ewaldcoeff_q, real ewaldcoeff_lj)
 Tell our PME-only node to switch to a new grid size.
 
void gmx_pme_send_resetcounters (const t_commrec *cr, int64_t step)
 Tell our PME-only node to reset all cycle and flop counters.
 
static void receive_virial_energy (const t_commrec *cr, gmx::ForceWithVirial *forceWithVirial, real *energy_q, real *energy_lj, real *dvdlambda_q, real *dvdlambda_lj, float *pme_cycles)
 Receive virial and energy from PME rank.
 
static void recvFFromPme (gmx::PmePpCommGpu *pmePpCommGpu, void *recvptr, int n, const t_commrec *cr, bool useGpuPmePpComms, bool receivePmeForceToGpu)
 Recieve force data from PME ranks.
 
void gmx_pme_receive_f (gmx::PmePpCommGpu *pmePpCommGpu, const t_commrec *cr, gmx::ForceWithVirial *forceWithVirial, real *energy_q, real *energy_lj, real *dvdlambda_q, real *dvdlambda_lj, bool useGpuPmePpComms, bool receivePmeForceToGpu, float *pme_cycles)
 PP nodes receive the long range forces from the PME nodes.
 

Variables

static constexpr bool c_useDelayedWait = false
 Block to wait for communication to PME ranks to complete. More...
 

Variable Documentation

constexpr bool c_useDelayedWait = false
static

Block to wait for communication to PME ranks to complete.

This should be faster with a real non-blocking MPI implementation