Advanced Multi-Physics (AMP)
On-Line Documentation
AMP_MPI.h
Go to the documentation of this file.
1// This file includes a wrapper class for MPI functions
2#ifndef included_AMP_MPI
3#define included_AMP_MPI
4
5
6#include <any>
7#include <array>
8#include <atomic>
9#include <map>
10#include <memory>
11#include <random>
12#include <set>
13#include <string>
14#include <tuple>
15#include <vector>
16
17
18// Add the definitions for the TPLs that are used
19#include "AMP/AMP_TPLs.h"
20
21// Include MPI if we are building with MPI
22#include "AMP/utils/UtilityMacros.h"
23#ifdef AMP_USE_MPI
25 #include "mpi.h"
27#endif
28
29// Define extra comm_world, comm_self, and comm_null ids
30#define AMP_COMM_NULL AMP::AMP_MPI::commNull
31#define AMP_COMM_SELF AMP::AMP_MPI::commSelf
32#define AMP_COMM_WORLD AMP::AMP_MPI::commWorld
33
34
35// Define SAMRAI MPI object
36#ifdef AMP_USE_SAMRAI
37namespace SAMRAI::tbox {
38class SAMRAI_MPI;
39}
40#endif
41
42
43namespace AMP {
44
45
62class alignas( 8 ) AMP_MPI final
63{
64public:
66
67#ifdef AMP_USE_MPI
68 typedef MPI_Comm Comm;
69 typedef MPI_Datatype Datatype;
70 typedef MPI_Request Request2;
71 constexpr static bool has_MPI = true;
72#else
73 typedef uint32_t Comm;
74 typedef uint32_t Datatype;
75 typedef uint32_t Request2;
76 constexpr static bool has_MPI = false;
77#endif
78 static Comm commNull;
79 static Comm commSelf;
81
82 constexpr static uint64_t hashNull = 0xcc6bc5507c132516;
83 constexpr static uint64_t hashSelf = 0x070b9699a107fe57;
84 constexpr static uint64_t hashWorld = 0x3d5fdf58e4df5a94;
85 constexpr static uint64_t hashMPI = 0x641118b35a0d87cd;
86
87 class Request final
88 {
89 public:
90 Request( Request2 request = Request2(), std::any data = std::any() );
92 operator Request2() const { return d_data->first; }
93 Request2 *get() { return &d_data->first; }
94
95 private:
96 std::shared_ptr<std::pair<Request2, std::any>> d_data;
97 };
98
99
100public: // Constructors
106
107
110
111
124 AMP_MPI( Comm comm, bool manage = false );
125
126
133 AMP_MPI( const AMP_MPI &comm );
134
135
140 AMP_MPI( AMP_MPI &&rhs );
141
142
148 AMP_MPI &operator=( const AMP_MPI &comm );
149
150
156
157
162 void reset();
163
164
165public: // Member functions
171 static std::string getNodeName();
172
173
176
177
179 static std::vector<int> getProcessAffinity();
180
181
183 static void setProcessAffinity( const std::vector<int> &procs );
184
185
211 static void balanceProcesses( const AMP_MPI &comm = AMP_MPI( AMP_COMM_WORLD ),
212 int method = 1,
213 const std::vector<int> &procs = std::vector<int>(),
214 int N_min = 1,
215 int N_max = -1 );
216
217
220
221
226 size_t rand() const;
227
228
247 AMP_MPI split( int color, int key = -1, bool manage = true ) const;
248
249
265 AMP_MPI splitByNode( int key = -1, bool manage = true ) const;
266
267
278 AMP_MPI dup( bool manage = true ) const;
279
280
294 static AMP_MPI intersect( const AMP_MPI &comm1, const AMP_MPI &comm2 );
295
296
300 bool isNull() const;
301
302
308 std::vector<int> globalRanks() const;
309
310
319 uint64_t hash() const;
320
321
328 uint64_t hashRanks() const;
329
330
339 const Comm &getCommunicator() const;
340
341
348 bool operator==( const AMP_MPI & ) const;
349
350
357 bool operator!=( const AMP_MPI & ) const;
358
359
371 bool operator<( const AMP_MPI & ) const;
372
373
384 bool operator<=( const AMP_MPI & ) const;
385
386
398 bool operator>( const AMP_MPI & ) const;
399
400
412 bool operator>=( const AMP_MPI & ) const;
413
414
423 int compare( const AMP_MPI & ) const;
424
425
430 int getRank() const;
431
432
436 int getSize() const;
437
438
442 int maxTag() const;
443
444
451 int newTag() const;
452
453
461 void abort() const;
462
463
470 void setCallAbortInSerialInsteadOfExit( bool flag = true );
471
472
479 bool allReduce( const bool value ) const;
480
481
488 bool anyReduce( const bool value ) const;
489
490
497 void allReduce( std::vector<bool> &value ) const;
498
499
506 void anyReduce( std::vector<bool> &value ) const;
507
508
515 template<class type>
516 type sumReduce( const type &value ) const;
517
518
527 template<class type>
528 void sumReduce( type *x, int n = 1 ) const;
529
530
540 template<class type>
541 void sumReduce( const type *x, type *y, int n = 1 ) const;
542
543
550 template<class type>
551 type minReduce( const type &value ) const;
552
553
562 template<class type>
563 void minReduce( type *x, int n ) const;
564
565
575 template<class type>
576 void minReduce( const type *x, type *y, int n ) const;
577
588 template<class type>
589 void minReduce( type *x, int n, int *rank ) const;
590
591
603 template<class type>
604 void minReduce( const type *x, type *y, int n, int *rank ) const;
605
606
613 template<class type>
614 type maxReduce( const type &value ) const;
615
616
629 template<class type>
630 void maxReduce( type *x, int n ) const;
631
632
646 template<class type>
647 void maxReduce( const type *x, type *y, int n ) const;
648
663 template<class type>
664 void maxReduce( type *x, int n, int *rank ) const;
665
666
682 template<class type>
683 void maxReduce( const type *x, type *y, int n, int *rank ) const;
684
685
692 template<class type>
693 type sumScan( const type &x ) const;
694
695
704 template<class type>
705 void sumScan( const type *x, type *y, int n ) const;
706
707
714 template<class type>
715 type minScan( const type &x ) const;
716
717
726 template<class type>
727 void minScan( const type *x, type *y, int n ) const;
728
729
736 template<class type>
737 type maxScan( const type &x ) const;
738
739
748 template<class type>
749 void maxScan( const type *x, type *y, int n ) const;
750
751
758 template<class type>
759 type bcast( const type &value, int root ) const;
760
761
769 template<class type>
770 void bcast( type *value, int n, int root ) const;
771
772
776 void barrier() const;
777
778
787 void sleepBarrier( int ms = 10 ) const;
788
789
805 template<class type>
806 void send( const type &data, int recv, int tag = 0 ) const;
807
808
825 template<class type>
826 void send( const type *buf, int length, int recv, int tag = 0 ) const;
827
828
842 void sendBytes( const void *buf, int N_bytes, int recv, int tag = 0 ) const;
843
844
856 template<class type>
857 Request Isend( const type &data, int recv_proc, int tag ) const;
858
859
872 template<class type>
873 Request Isend( const type *buf, int length, int recv_proc, int tag ) const;
874
875
888 Request IsendBytes( const void *buf, int N_bytes, int recv_proc, int tag ) const;
889
890
899 template<class type>
900 type recv( int send, int tag = 0 ) const;
901
902
913 template<class type>
914 void recv( type *buf, int length, int send, int tag = 0 ) const;
915
916
936 template<class type>
937 void recv( type *buf, int &length, int send, bool get_length, int tag = 0 ) const;
938
939
953 void recvBytes( void *buf, int N_bytes, int send, int tag = 0 ) const;
954
955
965 template<class type>
966 Request Irecv( type &data, int send_proc, int tag ) const;
967
968
979 template<class type>
980 Request Irecv( type *buf, int length, int send_proc, int tag ) const;
981
982
995 Request IrecvBytes( void *buf, int N_bytes, int send_proc, int tag ) const;
996
997
1010 template<class type>
1011 void sendrecv( const type *sendbuf,
1012 int sendcount,
1013 int dest,
1014 int sendtag,
1015 type *recvbuf,
1016 int recvcount,
1017 int source,
1018 int recvtag ) const;
1019
1020
1026 template<class type>
1027 std::vector<type> allGather( const type &x ) const;
1028
1029
1035 template<class type>
1036 std::vector<type> allGather( const std::vector<type> &x ) const;
1037
1038
1047 template<class type>
1048 void allGather( const type &x_in, type *x_out ) const;
1049
1050
1075 template<class type>
1076 int allGather( const type *send_data,
1077 int send_cnt,
1078 type *recv_data,
1079 int *recv_cnt = nullptr,
1080 int *recv_disp = nullptr,
1081 bool known_recv = false ) const;
1082
1083
1088 template<class type>
1089 void setGather( std::set<type> &set ) const;
1090
1091
1097 template<class KEY, class DATA>
1098 void mapGather( std::map<KEY, DATA> &map ) const;
1099
1100
1107 template<class type>
1108 std::vector<type> gather( const type &x, int root ) const;
1109
1110
1117 template<class type>
1118 std::vector<type> gather( const std::vector<type> &x, int root ) const;
1119
1120
1133 template<class type>
1134 void gather( const type *send_data,
1135 int send_cnt,
1136 type *recv_data,
1137 const int *recv_cnt,
1138 const int *recv_disp,
1139 int root ) const;
1140
1152 template<class type>
1153 void allToAll( int n, const type *send_data, type *recv_data ) const;
1154
1162 template<class type>
1163 std::vector<type> allToAll( const std::vector<type> &send ) const;
1164
1165
1194 template<class type>
1195 int allToAll( const type *send_data,
1196 const int send_cnt[],
1197 const int send_disp[],
1198 type *recv_data,
1199 int *recv_cnt = nullptr,
1200 int *recv_disp = nullptr,
1201 bool known_recv = false ) const;
1202
1203
1224 template<class type, class int_type>
1225 std::vector<type> allToAll( const std::vector<type> &send_data,
1226 const std::vector<int_type> &send_cnt,
1227 const std::vector<int_type> &send_disp,
1228 const std::vector<int_type> &recv_cnt,
1229 const std::vector<int_type> &recv_disp ) const;
1230
1231
1244 int
1245 calcAllToAllDisp( const int *send_cnt, int *send_disp, int *recv_cnt, int *recv_disp ) const;
1246
1247
1260 int calcAllToAllDisp( const std::vector<int> &send_cnt,
1261 std::vector<int> &send_disp,
1262 std::vector<int> &recv_cnt,
1263 std::vector<int> &recv_disp ) const;
1264
1265
1276 std::vector<int> commRanks( const std::vector<int> &ranks ) const;
1277
1278
1285 static void wait( const Request &request );
1286
1287
1294 static void wait( Request2 request );
1295
1296
1306 static int waitAny( int count, const Request *request );
1307
1308
1318 static int waitAny( int count, Request2 *request );
1319
1320
1329 static void waitAll( int count, const Request *request );
1330
1338 static void waitAll( int count, Request2 *request );
1339
1340
1349 static std::vector<int> waitSome( int count, const Request *request );
1350
1351
1361 static std::vector<int> waitSome( int count, Request2 *request );
1362
1363
1374 std::tuple<int, int, int> Iprobe( int source = -1, int tag = -1 ) const;
1375
1376
1386 std::tuple<int, int, int> probe( int source = -1, int tag = -1 ) const;
1387
1388
1396 void serializeStart() const;
1397
1398
1403 void serializeStop() const;
1404
1405
1414 static double time();
1415
1416
1421 static double tick();
1422
1423
1429 static void changeProfileLevel( int level ) { profile_level = level; }
1430
1431
1433 static size_t MPI_Comm_created();
1434
1436 static size_t MPI_Comm_destroyed();
1437
1439 static std::string info();
1440
1442 static std::array<int, 2> version();
1443
1445 static bool MPI_Active();
1446
1448 static void start_MPI( int &argc, char *argv[], int profile_level = 0 );
1449
1451 static void stop_MPI();
1452
1453
1454#ifdef AMP_USE_SAMRAI
1455public: // SAMRAI interfaces
1457 AMP_MPI( const SAMRAI::tbox::SAMRAI_MPI &comm );
1458
1460 operator SAMRAI::tbox::SAMRAI_MPI() const;
1461#endif
1462
1463
1464private: // Helper functions
1465 std::mt19937_64 *getRand() const;
1466
1467private: // data members
1468 using atomic_ptr = std::atomic_int *volatile;
1469 using atomic_int = volatile std::atomic_int64_t;
1470 using int_ptr = int *volatile;
1471 using rand_ptr = std::mt19937_64 *volatile;
1472
1474 bool d_isNull = true;
1475 bool d_manage = false;
1476 bool d_call_abort = true;
1477 int d_rank = 0;
1478 int d_size = 1;
1479 uint64_t d_hash = hashNull;
1481 mutable int_ptr d_ranks = nullptr;
1483 mutable rand_ptr d_rand = nullptr;
1484 static short profile_level;
1485 static int d_maxTag;
1488};
1489
1490
1492template<class TYPE>
1493AMP_MPI getComm( const TYPE &obj );
1494
1495
1496} // namespace AMP
1497
1498
1499#endif
#define AMP_COMM_WORLD
Definition AMP_MPI.h:32
Request2 * get()
Definition AMP_MPI.h:93
std::shared_ptr< std::pair< Request2, std::any > > d_data
Definition AMP_MPI.h:96
Request(Request2 request=Request2(), std::any data=std::any())
Provides C++ wrapper around MPI routines.
Definition AMP_MPI.h:63
static Comm commNull
Definition AMP_MPI.h:78
AMP_MPI(Comm comm, bool manage=false)
Constructor from existing MPI communicator.
int getRank() const
void minScan(const type *x, type *y, int n) const
Scan Min Reduce.
static int d_maxTag
The maximum valid tag.
Definition AMP_MPI.h:1485
void gather(const type *send_data, int send_cnt, type *recv_data, const int *recv_cnt, const int *recv_disp, int root) const
static size_t MPI_Comm_destroyed()
Return the total number of MPI_Comm objects that have been destroyed.
static std::vector< int > getProcessAffinity()
Function to return the affinity of the current process.
void sendrecv(const type *sendbuf, int sendcount, int dest, int sendtag, type *recvbuf, int recvcount, int source, int recvtag) const
This function sends and recieves data using a blocking call.
static void wait(const Request &request)
Wait for a communication to finish.
void minReduce(const type *x, type *y, int n) const
Min Reduce.
bool isNull() const
AMP_MPI split(int color, int key=-1, bool manage=true) const
Split an existing communicator.
bool d_call_abort
Do we want to call MPI_abort instead of exit.
Definition AMP_MPI.h:1476
uint64_t d_hash
A unique hash for the comm (consistent across comm)
Definition AMP_MPI.h:1479
void maxReduce(type *x, int n) const
Max Reduce.
bool operator==(const AMP_MPI &) const
Overload operator ==.
static short profile_level
The level for the profiles of MPI.
Definition AMP_MPI.h:1484
Comm d_comm
The internal MPI communicator.
Definition AMP_MPI.h:1473
std::vector< type > allToAll(const std::vector< type > &send_data, const std::vector< int_type > &send_cnt, const std::vector< int_type > &send_disp, const std::vector< int_type > &recv_cnt, const std::vector< int_type > &recv_disp) const
void recv(type *buf, int &length, int send, bool get_length, int tag=0) const
This function receives an MPI message with a data array from another processor.
static std::array< int, 2 > version()
Return the MPI version number { major, minor }.
static constexpr bool has_MPI
Definition AMP_MPI.h:71
int maxTag() const
MPI_Comm Comm
Definition AMP_MPI.h:68
void sendBytes(const void *buf, int N_bytes, int recv, int tag=0) const
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number.
static void wait(Request2 request)
Wait for a communication to finish.
std::vector< type > allToAll(const std::vector< type > &send) const
type bcast(const type &value, int root) const
Broadcast.
void setGather(std::set< type > &set) const
Request IrecvBytes(void *buf, int N_bytes, int send_proc, int tag) const
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any pro...
AMP_MPI & operator=(AMP_MPI &&rhs)
static std::vector< int > waitSome(int count, Request2 *request)
Wait for some communications to finish.
std::vector< type > allGather(const std::vector< type > &x) const
static Comm commWorld
Definition AMP_MPI.h:80
static std::string getNodeName()
Get the node name.
std::vector< type > gather(const std::vector< type > &x, int root) const
static void balanceProcesses(const AMP_MPI &comm=AMP_MPI(AMP::AMP_MPI::commWorld), int method=1, const std::vector< int > &procs=std::vector< int >(), int N_min=1, int N_max=-1)
Load balance the processes within a node.
void allToAll(int n, const type *send_data, type *recv_data) const
void send(const type &data, int recv, int tag=0) const
This function sends an MPI message with an array to another processor.
void sumScan(const type *x, type *y, int n) const
Scan Sum Reduce.
void allGather(const type &x_in, type *x_out) const
int_ptr d_ranks
The ranks of the comm in the global comm.
Definition AMP_MPI.h:1481
void abort() const
static atomic_int N_MPI_Comm_destroyed
Number of MPI_Comm objects destroyed over time.
Definition AMP_MPI.h:1487
std::vector< int > commRanks(const std::vector< int > &ranks) const
Send a list of proccesor ids to communicate.
bool operator<(const AMP_MPI &) const
Overload operator <.
void minReduce(type *x, int n, int *rank) const
Min Reduce.
static constexpr uint64_t hashWorld
Definition AMP_MPI.h:84
std::vector< int > globalRanks() const
Return the global ranks for the comm.
MPI_Datatype Datatype
Definition AMP_MPI.h:69
AMP_MPI(const AMP_MPI &comm)
Constructor from existing communicator.
type maxReduce(const type &value) const
Max Reduce.
bool operator>(const AMP_MPI &) const
Overload operator >
void serializeStart() const
Start a serial region.
static double time()
Elapsed time.
void recv(type *buf, int length, int send, int tag=0) const
This function receives an MPI message with a data array from another processor. This call must be pai...
void barrier() const
int getSize() const
static int getNumberOfProcessors()
Function to return the number of processors available.
int calcAllToAllDisp(const std::vector< int > &send_cnt, std::vector< int > &send_disp, std::vector< int > &recv_cnt, std::vector< int > &recv_disp) const
void maxReduce(const type *x, type *y, int n, int *rank) const
Max Reduce.
static Comm commSelf
Definition AMP_MPI.h:79
int_ptr d_currentTag
The current tag.
Definition AMP_MPI.h:1480
void recvBytes(void *buf, int N_bytes, int send, int tag=0) const
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any pro...
std::tuple< int, int, int > Iprobe(int source=-1, int tag=-1) const
Nonblocking test for a message.
std::tuple< int, int, int > probe(int source=-1, int tag=-1) const
Blocking test for a message.
void mapGather(std::map< KEY, DATA > &map) const
int newTag() const
Return a new tag.
void sumReduce(const type *x, type *y, int n=1) const
Sum Reduce.
std::vector< type > allGather(const type &x) const
void minReduce(type *x, int n) const
Min Reduce.
AMP_MPI()
Empty constructor.
void sumReduce(type *x, int n=1) const
Sum Reduce.
volatile std::atomic_int64_t atomic_int
Definition AMP_MPI.h:1469
static AMP_MPI intersect(const AMP_MPI &comm1, const AMP_MPI &comm2)
Create a communicator from the intersection of two communicators.
Request Irecv(type *buf, int length, int send_proc, int tag) const
This function receives an MPI message with a data array from another processor using a non-blocking c...
Request Irecv(type &data, int send_proc, int tag) const
This function receives an MPI message with a data array from another processor using a non-blocking c...
const Comm & getCommunicator() const
static void waitAll(int count, const Request *request)
Wait for all communications to finish.
void sleepBarrier(int ms=10) const
Perform a global barrier putting idle processors to sleep.
type sumScan(const type &x) const
Scan Sum Reduce.
MPI_Request Request2
Definition AMP_MPI.h:70
Request Isend(const type &data, int recv_proc, int tag) const
This function sends an MPI message with an array to another processor using a non-blocking call....
void maxScan(const type *x, type *y, int n) const
Scan Max Reduce.
void maxReduce(const type *x, type *y, int n) const
Max Reduce.
static void changeProfileLevel(int level)
Change the level of the internal timers.
Definition AMP_MPI.h:1429
bool operator>=(const AMP_MPI &) const
Overload operator >=.
static constexpr uint64_t hashMPI
Definition AMP_MPI.h:85
bool d_isNull
Is the communicator NULL.
Definition AMP_MPI.h:1474
void bcast(type *value, int n, int root) const
Broadcast.
~AMP_MPI()
Empty destructor.
Request IsendBytes(const void *buf, int N_bytes, int recv_proc, int tag) const
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number using ...
type minReduce(const type &value) const
Min Reduce.
rand_ptr d_rand
Internal random number generator.
Definition AMP_MPI.h:1483
static bool MPI_Active()
Check if MPI is active.
void serializeStop() const
Stop a serial region.
static void start_MPI(int &argc, char *argv[], int profile_level=0)
Start MPI.
void reset()
Reset the object.
AMP_MPI dup(bool manage=true) const
Duplicate an existing communicator.
std::mt19937_64 * getRand() const
atomic_ptr d_count
How many objects share the communicator.
Definition AMP_MPI.h:1482
bool anyReduce(const bool value) const
Boolean any reduce.
void allReduce(std::vector< bool > &value) const
Boolean all reduce.
static size_t MPI_Comm_created()
Return the total number of MPI_Comm objects that have been created.
AMP_MPI(AMP_MPI &&rhs)
type maxScan(const type &x) const
Scan Max Reduce.
type minScan(const type &x) const
Scan Min Reduce.
uint64_t hash() const
Return a unique hash id for the comm.
int calcAllToAllDisp(const int *send_cnt, int *send_disp, int *recv_cnt, int *recv_disp) const
void setCallAbortInSerialInsteadOfExit(bool flag=true)
type sumReduce(const type &value) const
Sum Reduce.
int allToAll(const type *send_data, const int send_cnt[], const int send_disp[], type *recv_data, int *recv_cnt=nullptr, int *recv_disp=nullptr, bool known_recv=false) const
bool operator<=(const AMP_MPI &) const
Overload operator <=.
static std::string info()
Return details about MPI.
type recv(int send, int tag=0) const
This function receives an MPI message with a data array from another processor. This call must be pai...
int compare(const AMP_MPI &) const
Compare to another communicator.
int *volatile int_ptr
Definition AMP_MPI.h:1470
static void waitAll(int count, Request2 *request)
Wait for all communications to finish.
static void stop_MPI()
Stop MPI.
std::vector< type > gather(const type &x, int root) const
void send(const type *buf, int length, int recv, int tag=0) const
This function sends an MPI message with an array to another processor.
static constexpr uint64_t hashSelf
Definition AMP_MPI.h:83
int allGather(const type *send_data, int send_cnt, type *recv_data, int *recv_cnt=nullptr, int *recv_disp=nullptr, bool known_recv=false) const
size_t rand() const
Generate a random number.
void minReduce(const type *x, type *y, int n, int *rank) const
Sum Reduce.
AMP_MPI & operator=(const AMP_MPI &comm)
Assignment operator.
static double tick()
Timer resolution.
uint64_t hashRanks() const
Return a hash global ranks.
static std::vector< int > waitSome(int count, const Request *request)
Wait for some communications to finish.
AMP_MPI splitByNode(int key=-1, bool manage=true) const
Split an existing communicator by node.
static atomic_int N_MPI_Comm_created
Number of MPI_Comm objects created over time.
Definition AMP_MPI.h:1486
int d_size
The size of the communicator.
Definition AMP_MPI.h:1478
static void setProcessAffinity(const std::vector< int > &procs)
Function to set the affinity of the current process.
Request Isend(const type *buf, int length, int recv_proc, int tag) const
This function sends an MPI message with an array to another processor using a non-blocking call....
std::atomic_int *volatile atomic_ptr
Definition AMP_MPI.h:1468
static ThreadSupport queryThreadSupport()
Query the level of thread support.
static int waitAny(int count, const Request *request)
Wait for any communication to finish.
bool d_manage
Do we want to manage this communicator.
Definition AMP_MPI.h:1475
void maxReduce(type *x, int n, int *rank) const
Max Reduce.
bool operator!=(const AMP_MPI &) const
Overload operator !=.
static constexpr uint64_t hashNull
Definition AMP_MPI.h:82
bool allReduce(const bool value) const
Boolean all reduce.
std::mt19937_64 *volatile rand_ptr
Definition AMP_MPI.h:1471
static int waitAny(int count, Request2 *request)
Wait for any communication to finish.
int d_rank
The rank of the communicator.
Definition AMP_MPI.h:1477
void anyReduce(std::vector< bool > &value) const
Boolean any reduce.
#define DISABLE_WARNINGS
Re-enable warnings.
#define ENABLE_WARNINGS
Suppress all warnings.
AMP_MPI getComm(const TYPE &obj)
Return the underlying MPI class for the object.



Advanced Multi-Physics (AMP)
Oak Ridge National Laboratory
Idaho National Laboratory
Los Alamos National Laboratory
This page automatically produced from the
source code by doxygen
Last updated: Tue Mar 10 2026 13:06:41.
Comments on this page