Advanced Multi-Physics (AMP)
On-Line Documentation
CSRMatrixCommunicator.h
Go to the documentation of this file.
1#ifndef included_AMP_CSRMatrixCommunicator_h
2#define included_AMP_CSRMatrixCommunicator_h
3
4#include "AMP/AMP_TPLs.h"
5#include "AMP/matrices/CSRConfig.h"
6#include "AMP/matrices/data/CSRLocalMatrixData.h"
7#include "AMP/utils/AMP_MPI.h"
8#include "AMP/utils/Memory.h"
9#include "AMP/vectors/CommunicationList.h"
10
11#include <map>
12#include <memory>
13#include <set>
14#include <vector>
15
16namespace AMP::LinearAlgebra {
17
18template<typename Config>
20{
21public:
22 using gidx_t = typename Config::gidx_t;
23 using lidx_t = typename Config::lidx_t;
24 using scalar_t = typename Config::scalar_t;
26 using allocator_type = typename Config::allocator_type;
27 static_assert( std::is_same_v<typename allocator_type::value_type, void> );
29 typename std::allocator_traits<allocator_type>::template rebind_alloc<gidx_t>;
31 typename std::allocator_traits<allocator_type>::template rebind_alloc<lidx_t>;
33 typename std::allocator_traits<allocator_type>::template rebind_alloc<scalar_t>;
34
35 // create a host config for cases where DtoH migration is required
36 using ConfigHost = typename Config::template set_alloc<alloc::host>::type;
37
39 : d_send_called( false ),
40 d_num_sources( 0 ),
42 d_tag_test( -1 ),
43 d_tag_row( -1 ),
44 d_tag_col( -1 ),
45 d_tag_coeff( -1 )
46 {
47 }
48
49 CSRMatrixCommunicator( std::shared_ptr<CommunicationList> comm_list,
50 const bool flip_sendrecv = false )
51 : d_comm( comm_list->getComm() ),
52 d_send_called( false ),
53 d_num_sources( 0 ),
55 d_tag_test( d_comm.newTag() ),
56 d_tag_row( d_comm.newTag() ),
57 d_tag_col( d_comm.newTag() ),
58 d_tag_coeff( d_comm.newTag() )
59 {
60 auto send_sizes = !flip_sendrecv ? comm_list->getSendSizes() : comm_list->getReceiveSizes();
61 for ( int n = 0; n < d_comm.getSize(); ++n ) {
62 if ( send_sizes[n] > 0 ) {
63 d_allowed_dest.push_back( n );
64 }
65 }
66 auto recv_sizes = !flip_sendrecv ? comm_list->getReceiveSizes() : comm_list->getSendSizes();
67 for ( int n = 0; n < d_comm.getSize(); ++n ) {
68 if ( recv_sizes[n] > 0 ) {
70 }
71 }
72
73 // This combination will require migration to/from host for send/recv
74 // create an internal host version of this communicator
75 if ( MIGRATE_DEV ) {
77 std::make_shared<CSRMatrixCommunicator<ConfigHost>>( comm_list, flip_sendrecv );
78 }
79 }
80
81 void sendMatrices( const std::map<int, std::shared_ptr<localmatrixdata_t>> &matrices );
82 std::map<int, std::shared_ptr<localmatrixdata_t>>
83 recvMatrices( gidx_t first_row, gidx_t last_row, gidx_t first_col, gidx_t last_col );
84
85protected:
86 void migrateToHost( const std::map<int, std::shared_ptr<localmatrixdata_t>> &matrices );
87 std::map<int, std::shared_ptr<localmatrixdata_t>> migrateFromHost(
88 const std::map<int, std::shared_ptr<CSRLocalMatrixData<ConfigHost>>> &matrices );
89
90 void countSources( const std::map<int, std::shared_ptr<localmatrixdata_t>> &matrices );
91
96 std::vector<int> d_allowed_dest;
97
98 std::vector<AMP_MPI::Request> d_send_requests;
99
100 // matrix migration support
101 std::map<int, std::shared_ptr<CSRLocalMatrixData<ConfigHost>>> d_send_mat_migrate;
102 std::shared_ptr<CSRMatrixCommunicator<ConfigHost>> d_migrate_comm;
103
104 // tags for each type of message to send/recv
109
110 // flag if device matrices need migration before/after comms
111#if defined( AMP_GPU_AWARE_MPI )
112 // have gpu-aware mpi, so migration never needed
113 static constexpr bool MIGRATE_DEV = false;
114#elif defined( AMP_USE_DEVICE )
115 // do not have gpu-aware mpi, only need migration if
116 // matrices live on device
117 static constexpr bool MIGRATE_DEV = std::is_same_v<allocator_type, AMP::DeviceAllocator<void>>;
118#else
119 // not a device build, so migration irrelevant
120 static constexpr bool MIGRATE_DEV = false;
121#endif
122};
123} // namespace AMP::LinearAlgebra
124
125#endif
Provides C++ wrapper around MPI routines.
Definition AMP_MPI.h:63
int getSize() const
typename std::allocator_traits< allocator_type >::template rebind_alloc< scalar_t > scalarAllocator_t
typename std::allocator_traits< allocator_type >::template rebind_alloc< lidx_t > lidxAllocator_t
typename std::allocator_traits< allocator_type >::template rebind_alloc< gidx_t > gidxAllocator_t
std::shared_ptr< CSRMatrixCommunicator< ConfigHost > > d_migrate_comm
std::map< int, std::shared_ptr< CSRLocalMatrixData< ConfigHost > > > d_send_mat_migrate
std::vector< AMP_MPI::Request > d_send_requests
typename Config::allocator_type allocator_type
std::map< int, std::shared_ptr< localmatrixdata_t > > migrateFromHost(const std::map< int, std::shared_ptr< CSRLocalMatrixData< ConfigHost > > > &matrices)
void countSources(const std::map< int, std::shared_ptr< localmatrixdata_t > > &matrices)
typename Config::template set_alloc< alloc::host >::type ConfigHost
CSRMatrixCommunicator(std::shared_ptr< CommunicationList > comm_list, const bool flip_sendrecv=false)
std::map< int, std::shared_ptr< localmatrixdata_t > > recvMatrices(gidx_t first_row, gidx_t last_row, gidx_t first_col, gidx_t last_col)
void sendMatrices(const std::map< int, std::shared_ptr< localmatrixdata_t > > &matrices)
void migrateToHost(const std::map< int, std::shared_ptr< localmatrixdata_t > > &matrices)
AMP_MPI getComm(const TYPE &obj)
Return the underlying MPI class for the object.



Advanced Multi-Physics (AMP)
Oak Ridge National Laboratory
Idaho National Laboratory
Los Alamos National Laboratory
This page automatically produced from the
source code by doxygen
Last updated: Tue Mar 10 2026 13:06:40.
Comments on this page