All to all exchange of a single integer.
This is a wrapper around the sparse alltoall implementation and overcome the lack of non-blocking collectives on some systems.
Type | Intent | Optional | Attributes | Name | ||
---|---|---|---|---|---|---|
integer, | intent(in) | :: | targets(:) |
List of target ranks to send an integer to. |
||
integer, | intent(in) | :: | send_buffer(:) |
Data to send to the respective target ranks. This array has to have the same ordering as targets. |
||
integer, | intent(out), | allocatable | :: | sources(:) |
List of ranks we received data from (source ranks). The array will be allocated with a size according to the number of processes that send a request to this process. |
|
integer, | intent(out), | allocatable | :: | recv_buffer(:) |
Received data from the sources. The array has the same size and ordering as the sources array. |
|
integer, | intent(in) | :: | comm |
MPI Communicator to use for this data exchange. |
||
integer, | intent(in), | optional | :: | tag |
Tag to use in the communications. Defaults to 22. |
subroutine tem_comm_alltoall_int( targets, send_buffer, & & sources, recv_buffer, & & comm, tag ) !> List of target ranks to send an integer to. integer, intent(in) :: targets(:) !> Data to send to the respective target ranks. This array has to have the !! same ordering as targets. integer, intent(in) :: send_buffer(:) !> List of ranks we received data from (source ranks). !! The array will be allocated with a size according to the number of !! processes that send a request to this process. integer, intent(out), allocatable :: sources(:) !> Received data from the sources. The array has the same size and ordering !! as the sources array. integer, intent(out), allocatable :: recv_buffer(:) !> MPI Communicator to use for this data exchange. integer, intent(in) :: comm !> Tag to use in the communications. Defaults to 22. integer, intent(in), optional :: tag integer :: nProcs integer :: nSources integer :: iProc, iSource integer :: iError integer, allocatable :: buf(:) if (use_sparse_alltoall) then call tem_sparse_alltoall_int( targets = targets, & & send_buffer = send_buffer, & & sources = sources, & & recv_buffer = recv_buffer, & & comm = comm, & & tag = tag ) else call MPI_Comm_size(comm, nProcs, iError) allocate(buf(0:nProcs-1)) buf = 0 buf(targets(:)) = send_buffer call MPI_Alltoall( MPI_IN_PLACE, 1, MPI_INTEGER, & & buf, 1, MPI_INTEGER, comm, iError ) nSources = count(buf/=0) allocate(sources(nSources)) allocate(recv_buffer(nSources)) recv_buffer = 0 iSource = 1 do iProc=0,nProcs-1 if (buf(iProc) /= 0) then sources(iSource) = iProc recv_buffer(iSource) = buf(iProc) iSource = iSource + 1 end if end do deallocate(buf) end if end subroutine tem_comm_alltoall_int