2#ifndef included_AMP_MPI
3#define included_AMP_MPI
19#include "AMP/AMP_TPLs.h"
22#include "AMP/utils/UtilityMacros.h"
30#define AMP_COMM_NULL AMP::AMP_MPI::commNull
31#define AMP_COMM_SELF AMP::AMP_MPI::commSelf
32#define AMP_COMM_WORLD AMP::AMP_MPI::commWorld
73 typedef uint32_t
Comm;
76 constexpr static bool has_MPI =
false;
82 constexpr static uint64_t
hashNull = 0xcc6bc5507c132516;
83 constexpr static uint64_t
hashSelf = 0x070b9699a107fe57;
84 constexpr static uint64_t
hashWorld = 0x3d5fdf58e4df5a94;
85 constexpr static uint64_t
hashMPI = 0x641118b35a0d87cd;
96 std::shared_ptr<std::pair<Request2, std::any>>
d_data;
213 const std::vector<int> &procs = std::vector<int>(),
541 void sumReduce(
const type *x, type *y,
int n = 1 )
const;
604 void minReduce(
const type *x, type *y,
int n,
int *rank )
const;
683 void maxReduce(
const type *x, type *y,
int n,
int *rank )
const;
705 void sumScan(
const type *x, type *y,
int n )
const;
727 void minScan(
const type *x, type *y,
int n )
const;
749 void maxScan(
const type *x, type *y,
int n )
const;
759 type
bcast(
const type &value,
int root )
const;
770 void bcast( type *value,
int n,
int root )
const;
806 void send(
const type &data,
int recv,
int tag = 0 )
const;
842 void sendBytes(
const void *buf,
int N_bytes,
int recv,
int tag = 0 )
const;
937 void recv( type *buf,
int &
length,
int send,
bool get_length,
int tag = 0 )
const;
1010 template<
class type>
1018 int recvtag )
const;
1026 template<
class type>
1035 template<
class type>
1036 std::vector<type>
allGather(
const std::vector<type> &x )
const;
1047 template<
class type>
1075 template<
class type>
1079 int *recv_cnt =
nullptr,
1080 int *recv_disp =
nullptr,
1081 bool known_recv =
false )
const;
1088 template<
class type>
1097 template<
class KEY,
class DATA>
1107 template<
class type>
1108 std::vector<type>
gather(
const type &x,
int root )
const;
1117 template<
class type>
1118 std::vector<type>
gather(
const std::vector<type> &x,
int root )
const;
1133 template<
class type>
1137 const int *recv_cnt,
1138 const int *recv_disp,
1152 template<
class type>
1153 void allToAll(
int n,
const type *send_data, type *recv_data )
const;
1162 template<
class type>
1194 template<
class type>
1196 const int send_cnt[],
1197 const int send_disp[],
1199 int *recv_cnt =
nullptr,
1200 int *recv_disp =
nullptr,
1201 bool known_recv =
false )
const;
1224 template<
class type,
class int_type>
1225 std::vector<type>
allToAll(
const std::vector<type> &send_data,
1226 const std::vector<int_type> &send_cnt,
1227 const std::vector<int_type> &send_disp,
1228 const std::vector<int_type> &recv_cnt,
1229 const std::vector<int_type> &recv_disp )
const;
1261 std::vector<int> &send_disp,
1262 std::vector<int> &recv_cnt,
1263 std::vector<int> &recv_disp )
const;
1276 std::vector<int>
commRanks(
const std::vector<int> &ranks )
const;
1374 std::tuple<int, int, int>
Iprobe(
int source = -1,
int tag = -1 )
const;
1386 std::tuple<int, int, int>
probe(
int source = -1,
int tag = -1 )
const;
1454#ifdef AMP_USE_SAMRAI
1457 AMP_MPI(
const SAMRAI::tbox::SAMRAI_MPI &comm );
1460 operator SAMRAI::tbox::SAMRAI_MPI()
const;
std::shared_ptr< std::pair< Request2, std::any > > d_data
Request(Request2 request=Request2(), std::any data=std::any())
Provides C++ wrapper around MPI routines.
AMP_MPI(Comm comm, bool manage=false)
Constructor from existing MPI communicator.
void minScan(const type *x, type *y, int n) const
Scan Min Reduce.
static int d_maxTag
The maximum valid tag.
void gather(const type *send_data, int send_cnt, type *recv_data, const int *recv_cnt, const int *recv_disp, int root) const
static size_t MPI_Comm_destroyed()
Return the total number of MPI_Comm objects that have been destroyed.
static std::vector< int > getProcessAffinity()
Function to return the affinity of the current process.
void sendrecv(const type *sendbuf, int sendcount, int dest, int sendtag, type *recvbuf, int recvcount, int source, int recvtag) const
This function sends and recieves data using a blocking call.
static void wait(const Request &request)
Wait for a communication to finish.
void minReduce(const type *x, type *y, int n) const
Min Reduce.
AMP_MPI split(int color, int key=-1, bool manage=true) const
Split an existing communicator.
bool d_call_abort
Do we want to call MPI_abort instead of exit.
uint64_t d_hash
A unique hash for the comm (consistent across comm)
void maxReduce(type *x, int n) const
Max Reduce.
bool operator==(const AMP_MPI &) const
Overload operator ==.
static short profile_level
The level for the profiles of MPI.
Comm d_comm
The internal MPI communicator.
std::vector< type > allToAll(const std::vector< type > &send_data, const std::vector< int_type > &send_cnt, const std::vector< int_type > &send_disp, const std::vector< int_type > &recv_cnt, const std::vector< int_type > &recv_disp) const
void recv(type *buf, int &length, int send, bool get_length, int tag=0) const
This function receives an MPI message with a data array from another processor.
static std::array< int, 2 > version()
Return the MPI version number { major, minor }.
static constexpr bool has_MPI
void sendBytes(const void *buf, int N_bytes, int recv, int tag=0) const
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number.
static void wait(Request2 request)
Wait for a communication to finish.
std::vector< type > allToAll(const std::vector< type > &send) const
type bcast(const type &value, int root) const
Broadcast.
void setGather(std::set< type > &set) const
Request IrecvBytes(void *buf, int N_bytes, int send_proc, int tag) const
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any pro...
AMP_MPI & operator=(AMP_MPI &&rhs)
static std::vector< int > waitSome(int count, Request2 *request)
Wait for some communications to finish.
std::vector< type > allGather(const std::vector< type > &x) const
static std::string getNodeName()
Get the node name.
std::vector< type > gather(const std::vector< type > &x, int root) const
static void balanceProcesses(const AMP_MPI &comm=AMP_MPI(AMP::AMP_MPI::commWorld), int method=1, const std::vector< int > &procs=std::vector< int >(), int N_min=1, int N_max=-1)
Load balance the processes within a node.
void allToAll(int n, const type *send_data, type *recv_data) const
void send(const type &data, int recv, int tag=0) const
This function sends an MPI message with an array to another processor.
void sumScan(const type *x, type *y, int n) const
Scan Sum Reduce.
void allGather(const type &x_in, type *x_out) const
int_ptr d_ranks
The ranks of the comm in the global comm.
static atomic_int N_MPI_Comm_destroyed
Number of MPI_Comm objects destroyed over time.
std::vector< int > commRanks(const std::vector< int > &ranks) const
Send a list of proccesor ids to communicate.
bool operator<(const AMP_MPI &) const
Overload operator <.
void minReduce(type *x, int n, int *rank) const
Min Reduce.
static constexpr uint64_t hashWorld
std::vector< int > globalRanks() const
Return the global ranks for the comm.
AMP_MPI(const AMP_MPI &comm)
Constructor from existing communicator.
type maxReduce(const type &value) const
Max Reduce.
bool operator>(const AMP_MPI &) const
Overload operator >
void serializeStart() const
Start a serial region.
static double time()
Elapsed time.
void recv(type *buf, int length, int send, int tag=0) const
This function receives an MPI message with a data array from another processor. This call must be pai...
static int getNumberOfProcessors()
Function to return the number of processors available.
int calcAllToAllDisp(const std::vector< int > &send_cnt, std::vector< int > &send_disp, std::vector< int > &recv_cnt, std::vector< int > &recv_disp) const
void maxReduce(const type *x, type *y, int n, int *rank) const
Max Reduce.
int_ptr d_currentTag
The current tag.
void recvBytes(void *buf, int N_bytes, int send, int tag=0) const
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any pro...
std::tuple< int, int, int > Iprobe(int source=-1, int tag=-1) const
Nonblocking test for a message.
std::tuple< int, int, int > probe(int source=-1, int tag=-1) const
Blocking test for a message.
void mapGather(std::map< KEY, DATA > &map) const
int newTag() const
Return a new tag.
void sumReduce(const type *x, type *y, int n=1) const
Sum Reduce.
std::vector< type > allGather(const type &x) const
void minReduce(type *x, int n) const
Min Reduce.
AMP_MPI()
Empty constructor.
void sumReduce(type *x, int n=1) const
Sum Reduce.
volatile std::atomic_int64_t atomic_int
static AMP_MPI intersect(const AMP_MPI &comm1, const AMP_MPI &comm2)
Create a communicator from the intersection of two communicators.
Request Irecv(type *buf, int length, int send_proc, int tag) const
This function receives an MPI message with a data array from another processor using a non-blocking c...
Request Irecv(type &data, int send_proc, int tag) const
This function receives an MPI message with a data array from another processor using a non-blocking c...
const Comm & getCommunicator() const
static void waitAll(int count, const Request *request)
Wait for all communications to finish.
void sleepBarrier(int ms=10) const
Perform a global barrier putting idle processors to sleep.
type sumScan(const type &x) const
Scan Sum Reduce.
Request Isend(const type &data, int recv_proc, int tag) const
This function sends an MPI message with an array to another processor using a non-blocking call....
void maxScan(const type *x, type *y, int n) const
Scan Max Reduce.
void maxReduce(const type *x, type *y, int n) const
Max Reduce.
static void changeProfileLevel(int level)
Change the level of the internal timers.
bool operator>=(const AMP_MPI &) const
Overload operator >=.
static constexpr uint64_t hashMPI
bool d_isNull
Is the communicator NULL.
void bcast(type *value, int n, int root) const
Broadcast.
~AMP_MPI()
Empty destructor.
Request IsendBytes(const void *buf, int N_bytes, int recv_proc, int tag) const
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number using ...
type minReduce(const type &value) const
Min Reduce.
rand_ptr d_rand
Internal random number generator.
static bool MPI_Active()
Check if MPI is active.
void serializeStop() const
Stop a serial region.
static void start_MPI(int &argc, char *argv[], int profile_level=0)
Start MPI.
void reset()
Reset the object.
AMP_MPI dup(bool manage=true) const
Duplicate an existing communicator.
std::mt19937_64 * getRand() const
atomic_ptr d_count
How many objects share the communicator.
bool anyReduce(const bool value) const
Boolean any reduce.
void allReduce(std::vector< bool > &value) const
Boolean all reduce.
static size_t MPI_Comm_created()
Return the total number of MPI_Comm objects that have been created.
type maxScan(const type &x) const
Scan Max Reduce.
type minScan(const type &x) const
Scan Min Reduce.
uint64_t hash() const
Return a unique hash id for the comm.
int calcAllToAllDisp(const int *send_cnt, int *send_disp, int *recv_cnt, int *recv_disp) const
void setCallAbortInSerialInsteadOfExit(bool flag=true)
type sumReduce(const type &value) const
Sum Reduce.
int allToAll(const type *send_data, const int send_cnt[], const int send_disp[], type *recv_data, int *recv_cnt=nullptr, int *recv_disp=nullptr, bool known_recv=false) const
bool operator<=(const AMP_MPI &) const
Overload operator <=.
static std::string info()
Return details about MPI.
type recv(int send, int tag=0) const
This function receives an MPI message with a data array from another processor. This call must be pai...
int compare(const AMP_MPI &) const
Compare to another communicator.
static void waitAll(int count, Request2 *request)
Wait for all communications to finish.
static void stop_MPI()
Stop MPI.
std::vector< type > gather(const type &x, int root) const
void send(const type *buf, int length, int recv, int tag=0) const
This function sends an MPI message with an array to another processor.
static constexpr uint64_t hashSelf
int allGather(const type *send_data, int send_cnt, type *recv_data, int *recv_cnt=nullptr, int *recv_disp=nullptr, bool known_recv=false) const
size_t rand() const
Generate a random number.
void minReduce(const type *x, type *y, int n, int *rank) const
Sum Reduce.
AMP_MPI & operator=(const AMP_MPI &comm)
Assignment operator.
static double tick()
Timer resolution.
uint64_t hashRanks() const
Return a hash global ranks.
static std::vector< int > waitSome(int count, const Request *request)
Wait for some communications to finish.
AMP_MPI splitByNode(int key=-1, bool manage=true) const
Split an existing communicator by node.
static atomic_int N_MPI_Comm_created
Number of MPI_Comm objects created over time.
int d_size
The size of the communicator.
static void setProcessAffinity(const std::vector< int > &procs)
Function to set the affinity of the current process.
Request Isend(const type *buf, int length, int recv_proc, int tag) const
This function sends an MPI message with an array to another processor using a non-blocking call....
std::atomic_int *volatile atomic_ptr
static ThreadSupport queryThreadSupport()
Query the level of thread support.
static int waitAny(int count, const Request *request)
Wait for any communication to finish.
bool d_manage
Do we want to manage this communicator.
void maxReduce(type *x, int n, int *rank) const
Max Reduce.
bool operator!=(const AMP_MPI &) const
Overload operator !=.
static constexpr uint64_t hashNull
bool allReduce(const bool value) const
Boolean all reduce.
std::mt19937_64 *volatile rand_ptr
static int waitAny(int count, Request2 *request)
Wait for any communication to finish.
int d_rank
The rank of the communicator.
void anyReduce(std::vector< bool > &value) const
Boolean any reduce.
#define DISABLE_WARNINGS
Re-enable warnings.
#define ENABLE_WARNINGS
Suppress all warnings.
AMP_MPI getComm(const TYPE &obj)
Return the underlying MPI class for the object.