This subroutine reads a mesh in treelm format from disk at the specified directory name.
The mesh is so small, it probably is better to read it on one process and distribute the resulting data via the network. single proc read
single proc read
Type | Intent | Optional | Attributes | Name | ||
---|---|---|---|---|---|---|
type(treelmesh_type), | intent(inout) | :: | me |
Structure to load the mesh to |
||
integer, | intent(in) | :: | nParts |
Directory containing the mesh informations Partition to use on the calling process (= MPI Rank in comm) Number of partitions, the mesh is partitioned into (= Number of MPI processes in comm). |
subroutine load_treelmesh(me, nParts )
! -------------------------------------------------------------------- !
!> Structure to load the mesh to
type(treelmesh_type), intent(inout) :: me
!> Directory containing the mesh informations
! character(len=*), intent(in) :: dirname
!> Partition to use on the calling process (= MPI Rank in comm)
! integer, intent(in) :: myPart
!> Number of partitions, the mesh is partitioned into (= Number of MPI
!! processes in comm).
integer, intent(in) :: nParts
! -------------------------------------------------------------------- !
logical :: ex
integer :: iElem
integer :: iProp
integer :: fh, etype, ftype
integer :: iostatus( MPI_STATUS_SIZE )
integer :: iError
integer :: file_rec_len
integer :: typesize
character(len=300) :: ElemFileName
character(len=4) :: EndianSuffix
integer(kind=long_k), allocatable :: buffer(:)
integer(kind=long_k), allocatable :: globbuffer(:)
integer(kind=MPI_OFFSET_KIND) :: displacement
! -------------------------------------------------------------------- !
EndianSuffix = tem_create_EndianSuffix()
ElemFileName = trim(me%global%dirname)//'elemlist'//EndianSuffix
write(logUnit(1), *) 'Load mesh from file: '//trim(ElemFileName)
if (associated(me%property)) deallocate(me%property)
allocate(me%Property(me%global%nProperties))
allocate(me%Part_First(nParts))
allocate(me%Part_Last(nParts))
allocate(me%treeID(me%nElems))
if (me%global%myPart == 0) then
! Only one process should check the file existence
inquire(file=trim(ElemFileName), exist=ex)
if (.not. ex) then
write(logUnit(1),*) 'ERROR: File ' // trim(ElemFileName) &
& // ' not found!'
write(logUnit(1),*) 'Aborting.'
call tem_abort()
end if
end if
allocate(me%ElemPropertyBits(me%nElems))
allocate(buffer(me%nElems*2))
if (me%global%nElems*2 > io_buffer_size) then
write(logUnit(1),*) 'using MPI to read'
! Create a contiguous type to describe the vector per element
call MPI_Type_contiguous( 2, long_k_mpi, etype, iError )
call check_mpi_error(iError,'type etype in load_treelmesh')
call MPI_Type_commit( etype, iError )
call check_mpi_error(iError,'commit etype in load_treelmesh')
call MPI_Type_size(etype, typesize, iError )
call check_mpi_error(iError,'typesize in load_treelmesh')
! Create a MPI Subarray as ftype for file view
call MPI_Type_contiguous( me%nElems, etype, ftype, iError )
call check_mpi_error(iError,'type ftype in load_treelmesh')
call MPI_Type_commit( ftype, iError )
call check_mpi_error(iError,'commit ftype in load_treelmesh')
! Calculate displacement for file view
displacement = me%elemOffset * typesize * 1_MPI_OFFSET_KIND
! Set the view for each process on the file above
! Open the binary file for MPI I/O (Write)
call MPI_File_open( me%global%comm, trim(ElemFileName), &
& MPI_MODE_RDONLY, MPI_INFO_NULL, fh, iError )
call check_mpi_error(iError,'file_open in load_treelmesh')
call MPI_File_set_view( fh, displacement, etype, ftype, "native", &
& MPI_INFO_NULL, iError )
call check_mpi_error(iError,'set_view in load_treelmesh')
! Read data from the file
call MPI_File_read_all( fh, buffer, me%nElems, etype, iostatus, iError )
call check_mpi_error(iError,'read_all in load_treelmesh')
!Free the MPI_Datatypes which were created and close the file
call MPI_Type_free (etype, iError)
call check_mpi_error(iError,'free etype in load_treelmesh')
call MPI_Type_free (ftype, iError)
call check_mpi_error(iError,'free ftype in load_treelmesh')
call MPI_File_close(fh, iError)
call check_mpi_error(iError,'close file in load_treelmesh')
! END IO-part
else
!> The mesh is so small, it probably is better to read it on one process
!! and distribute the resulting data via the network.
write(logUnit(1),*) 'using one process to read and broadcast'
allocate( globbuffer(me%global%nElems*2) )
if (me%global%myPart == 0) then !! single proc read
inquire(iolength = file_rec_len) globbuffer
call tem_open( newunit = fh, &
& file = trim(ElemFileName), &
& recl = file_rec_len, &
& action = 'read', &
& access = 'direct', &
& form = 'unformatted' )
read(fh, rec=1) globbuffer
close(fh)
end if !! single proc read
call MPI_Bcast( globbuffer, int(2*me%global%nElems), long_k_mpi, 0, &
& me%global%comm, iError )
buffer = globbuffer(me%elemOffset*2+1:(me%ElemOffset+me%nElems)*2)
deallocate(globbuffer)
end if ! END IO-part
! Fill the tree with what was read in buffer from disk
do iElem = 1, me%nElems
me%treeID(iElem) = buffer( (iElem-1)*2 + 1 )
me%ElemPropertyBits(iElem) = buffer( (iElem-1)*2 + 2 )
end do
deallocate( buffer )
! clear the prp_sendHalo property since the distribution of elements
! might have changed
do iElem=1,me%nElems
me%ElemPropertyBits(iElem) = ibclr( me%ElemPropertyBits(iElem), &
& prp_sendHalo )
end do
call MPI_allgather( me%treeID(1), 1, long_k_mpi, &
& me%Part_First, 1, long_k_mpi, &
& me%global%comm, iError )
call MPI_allgather( me%treeID(me%nElems), 1, long_k_mpi, &
& me%Part_Last, 1, long_k_mpi, &
& me%global%comm, iError )
do iProp=1,me%global%nProperties
call gather_Property( Property = me%Property(iProp), &
& Header = me%global%Property(iProp), &
& BitField = me%ElemPropertyBits, &
& comm = me%global%comm )
end do
write(logUnit(1),*) 'Done, reading the mesh!'
end subroutine load_treelmesh