Unverified Commit 4e2b5add authored by Graham, Aaron's avatar Graham, Aaron Committed by GitHub
Browse files

Add gatherv generic to MPI environment (#327)

* Add gatherv generic to MPI environment

* Fix typo when MPI is undefined

* Fix serial version of testParallelEnv
parent 4d51ffd3
......@@ -184,12 +184,12 @@ TYPE,EXTENDS(ParEnvType) :: MPI_EnvType
send_REAL1_MPI_Env_type, &
send_INT1_MPI_Env_type
!> @copybrief ParallelEnv::gather_SIK0_MPI_Env_type
!> @copydetails ParallelEnv::gather_SIK0_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gather_SIK0_MPI_Env_type
!> @copybrief ParallelEnv::gather_SIK1_MPI_Env_type
!> @copydetails ParallelEnv::gather_SIK1_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gather_SIK1_MPI_Env_type
!> @copybrief ParallelEnv::gather_SNK0_MPI_Env_type
!> @copydetails ParallelEnv::gather_SNK0_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gather_SNK0_MPI_Env_type
!> @copybrief ParallelEnv::gather_SNK1_MPI_Env_type
!> @copydetails ParallelEnv::gather_SNK1_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gather_SNK1_MPI_Env_type
!> @copybrief ParallelEnv::gather_SLK0_MPI_Env_type
!> @copydetails ParallelEnv::gather_SLK0_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gather_SLK0_MPI_Env_type
......@@ -203,13 +203,21 @@ TYPE,EXTENDS(ParEnvType) :: MPI_EnvType
!> @copydetails ParallelEnv::gather_str2D_MPI_ENV_type
PROCEDURE,PASS,PRIVATE :: gather_str2D_MPI_ENV_type
!>
GENERIC :: gather => gather_SIK0_MPI_Env_type, &
GENERIC :: gather => gather_SNK0_MPI_Env_type, &
gather_SNK1_MPI_Env_type, &
gather_SLK0_MPI_Env_type, &
gather_SLK1_MPI_Env_type, &
gather_SIK1_MPI_Env_type, &
gather_str1D_MPI_ENV_type, &
gather_str2D_MPI_ENV_type
!> @copybrief ParallelEnv::gatherv_SSK1_MPI_Env_type
!> @copydetails ParallelEnv::gatherv_SSK1_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gatherv_SSK1_MPI_Env_type
!> @copybrief ParallelEnv::gatherv_SDK1_MPI_Env_type
!> @copydetails ParallelEnv::gatherv_SDK1_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: gatherv_SDK1_MPI_Env_type
!> Generic interface for MPI_gatherv
GENERIC :: gatherv => gatherv_SSK1_MPI_Env_type, &
gatherv_SDK1_MPI_Env_type
!> @copybrief ParallelEnv::scatter_SLK0_MPI_Env_type
!> @copydetails ParallelEnv::scatter_SLK0_MPI_Env_type
PROCEDURE,PASS,PRIVATE :: scatter_SLK0_MPI_Env_type
......@@ -997,10 +1005,10 @@ ENDSUBROUTINE recv_INT1_MPI_Env_type
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!>
SUBROUTINE gather_SIK0_MPI_Env_type(myPE,sendbuf,recvbuf,root)
SUBROUTINE gather_SNK0_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SIK),INTENT(IN) :: sendbuf
INTEGER(SIK),INTENT(INOUT) :: recvbuf(:)
INTEGER(SNK),INTENT(IN) :: sendbuf
INTEGER(SNK),INTENT(INOUT) :: recvbuf(:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank
rank=0
......@@ -1014,74 +1022,102 @@ SUBROUTINE gather_SIK0_MPI_Env_type(myPE,sendbuf,recvbuf,root)
#else
recvbuf(1)=sendbuf
#endif
ENDSUBROUTINE gather_SIK0_MPI_Env_type
ENDSUBROUTINE gather_SNK0_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper that emulates MPI_Gather for non-contiguous array of Strings
!> @brief Wrapper routine calls MPI_Gather for an SNK array
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!>
SUBROUTINE gather_str2D_MPI_ENV_type(myPE,sendbuf,root)
SUBROUTINE gather_SNK1_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
TYPE(StringType),INTENT(INOUT) :: sendbuf(:,:)
INTEGER(SNK),INTENT(IN) :: sendbuf(:)
INTEGER(SNK),INTENT(INOUT) :: recvbuf(:,:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank,count
#ifndef HAVE_MPI
INTEGER(SIK)::i,j,n
#endif
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
count=SIZE(sendbuf)
REQUIRE(SIZE(recvbuf) == myPE%nproc*count)
#ifdef HAVE_MPI
INTEGER(SIK) :: rank,i,j,iEntry,maxChars
INTEGER(SIK),ALLOCATABLE :: charProc(:,:)
CHARACTER(LEN=:),ALLOCATABLE :: chars
!32 Bit integer
CALL MPI_Gather(sendbuf,count,MPI_INTEGER,recvbuf,count, &
MPI_INTEGER,rank,myPE%comm,mpierr)
#else
DO n=1,count
i=MOD(n-1,SIZE(recvbuf,DIM=1))+1
j=(n-1)/SIZE(recvbuf,DIM=1)+1
recvbuf(i,j)=sendbuf(n)
ENDDO
#endif
ENDSUBROUTINE gather_SNK1_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Gather
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!>
SUBROUTINE gather_SLK0_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SLK),INTENT(IN) :: sendbuf
INTEGER(SLK),INTENT(INOUT) :: recvbuf(:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
IF(myPE%nproc == 0) RETURN
! Need to know the maximum length
maxChars = MAXVAL(LEN(sendbuf(:,:)))
CALL myPE%allReduceMaxI_scalar(maxChars)
!Need to know which process is responsible for which string
ALLOCATE(charProc(UBOUND(sendbuf,DIM=1),UBOUND(sendbuf,DIM=2)))
charProc = -HUGE(rank)
iEntry = 0
DO j=1,UBOUND(charProc,DIM=2)
DO i=1,UBOUND(charProc,DIM=1)
iEntry = iEntry + 1
IF(LEN(sendbuf(i,j)) > 0) charProc(i,j) = myPE%rank
ENDDO
REQUIRE(SIZE(recvbuf) == myPE%nproc)
#ifdef HAVE_MPI
CALL MPI_Gather(sendbuf,1,MPI_INTEGER8,recvbuf,1,MPI_INTEGER8, &
rank,myPE%comm,mpierr)
#else
recvbuf(1)=sendbuf
#endif
ENDSUBROUTINE gather_SLK0_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Gather
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!>
SUBROUTINE gather_SLK1_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SLK),INTENT(IN) :: sendbuf(:)
INTEGER(SLK),INTENT(INOUT) :: recvbuf(:,:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank,count
#ifndef HAVE_MPI
INTEGER(SIK)::i,j,n
#endif
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
count=SIZE(sendbuf)
REQUIRE(SIZE(recvbuf) == myPE%nproc*count)
#ifdef HAVE_MPI
CALL MPI_Gather(sendbuf,count,MPI_INTEGER8,recvbuf,count, &
MPI_INTEGER8,rank,myPE%comm,mpierr)
#else
DO n=1,count
i=MOD(n-1,SIZE(recvbuf,DIM=1))+1
j=(n-1)/SIZE(recvbuf,DIM=1)+1
recvbuf(i,j)=sendbuf(n)
ENDDO
!Ensure we get an updated rank for each character if possible
!Where data is empty on all processes, a rank of -HUGE will be skipped
CALL myPE%allReduceMaxI(SIZE(charProc),charProc)
!Set-up individual send and receive for each string
IF(myPE%rank /= rank) THEN
iEntry = 0
DO j=1,UBOUND(sendbuf,DIM=2)
DO i=1,UBOUND(sendbuf,DIM=1)
iEntry = iEntry + 1
IF(charProc(i,j) == myPE%rank) THEN
CALL myPE%send(CHAR(sendbuf(i,j)),rank,iEntry)
ENDIF
ENDDO
ENDDO
ELSE
ALLOCATE(CHARACTER(maxChars) :: chars)
iEntry = 0
DO j=1,UBOUND(sendbuf,DIM=2)
DO i=1,UBOUND(sendbuf,DIM=1)
iEntry = iEntry + 1
IF(charProc(i,j) /= rank .AND. charProc(i,j) >= 0) THEN
chars = REPEAT(" ",maxChars)
CALL myPE%recv(chars,charProc(i,j),iEntry)
sendbuf(i,j) = TRIM(chars)
ENDIF
ENDDO
ENDDO
ENDIF
#endif
ENDSUBROUTINE gather_str2D_MPI_ENV_type
ENDSUBROUTINE gather_SLK1_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper that emulates MPI_Gather for non-contiguous array of Strings
......@@ -1139,105 +1175,165 @@ SUBROUTINE gather_str1D_MPI_ENV_type(myPE,sendbuf,root)
ENDSUBROUTINE gather_str1D_MPI_ENV_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Gather for an SIK array
!> @brief Wrapper that emulates MPI_Gather for non-contiguous array of Strings
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!>
SUBROUTINE gather_SIK1_MPI_Env_type(myPE,sendbuf,recvbuf,root)
SUBROUTINE gather_str2D_MPI_ENV_type(myPE,sendbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SIK),INTENT(IN) :: sendbuf(:)
INTEGER(SIK),INTENT(INOUT) :: recvbuf(:,:)
TYPE(StringType),INTENT(INOUT) :: sendbuf(:,:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank,count
#ifndef HAVE_MPI
INTEGER(SIK)::i,j,n
#endif
#ifdef HAVE_MPI
INTEGER(SIK) :: rank,i,j,iEntry,maxChars
INTEGER(SIK),ALLOCATABLE :: charProc(:,:)
CHARACTER(LEN=:),ALLOCATABLE :: chars
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
count=SIZE(sendbuf)
REQUIRE(SIZE(recvbuf) == myPE%nproc*count)
#ifdef HAVE_MPI
#ifdef DBLINT
!64 Bit integer
CALL MPI_Gather(sendbuf,count,MPI_INTEGER8,recvbuf,count, &
MPI_INTEGER8,rank,myPE%comm,mpierr)
#else
!32 Bit integer
CALL MPI_Gather(sendbuf,count,MPI_INTEGER,recvbuf,count, &
MPI_INTEGER,rank,myPE%comm,mpierr)
#endif
#else
DO n=1,count
i=MOD(n-1,SIZE(recvbuf,DIM=1))+1
j=(n-1)/SIZE(recvbuf,DIM=1)+1
recvbuf(i,j)=sendbuf(n)
IF(myPE%nproc == 0) RETURN
! Need to know the maximum length
maxChars = MAXVAL(LEN(sendbuf(:,:)))
CALL myPE%allReduceMaxI_scalar(maxChars)
!Need to know which process is responsible for which string
ALLOCATE(charProc(UBOUND(sendbuf,DIM=1),UBOUND(sendbuf,DIM=2)))
charProc = -HUGE(rank)
iEntry = 0
DO j=1,UBOUND(charProc,DIM=2)
DO i=1,UBOUND(charProc,DIM=1)
iEntry = iEntry + 1
IF(LEN(sendbuf(i,j)) > 0) charProc(i,j) = myPE%rank
ENDDO
ENDDO
!Ensure we get an updated rank for each character if possible
!Where data is empty on all processes, a rank of -HUGE will be skipped
CALL myPE%allReduceMaxI(SIZE(charProc),charProc)
!Set-up individual send and receive for each string
IF(myPE%rank /= rank) THEN
iEntry = 0
DO j=1,UBOUND(sendbuf,DIM=2)
DO i=1,UBOUND(sendbuf,DIM=1)
iEntry = iEntry + 1
IF(charProc(i,j) == myPE%rank) THEN
CALL myPE%send(CHAR(sendbuf(i,j)),rank,iEntry)
ENDIF
ENDDO
ENDDO
ELSE
ALLOCATE(CHARACTER(maxChars) :: chars)
iEntry = 0
DO j=1,UBOUND(sendbuf,DIM=2)
DO i=1,UBOUND(sendbuf,DIM=1)
iEntry = iEntry + 1
IF(charProc(i,j) /= rank .AND. charProc(i,j) >= 0) THEN
chars = REPEAT(" ",maxChars)
CALL myPE%recv(chars,charProc(i,j),iEntry)
sendbuf(i,j) = TRIM(chars)
ENDIF
ENDDO
ENDDO
ENDIF
#endif
ENDSUBROUTINE gather_SIK1_MPI_Env_type
ENDSUBROUTINE gather_str2D_MPI_ENV_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Gather
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!> @brief Wrapper routine for MPI_gatherv for 1D array of SRK reals
!> @param this the MPI environment
!> @param sendbuf the data to be sent by this process
!> @param recvbuf the data received on the root process
!> @param recvcounts the counts of data received by the root process from other processes
!> @param root the process to gather all the data to; optional, defaults to 0
!>
SUBROUTINE gather_SLK0_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SLK),INTENT(IN) :: sendbuf
INTEGER(SLK),INTENT(INOUT) :: recvbuf(:)
SUBROUTINE gatherv_SSK1_MPI_Env_type(this,sendbuf,recvbuf,recvcounts,root)
CLASS(MPI_EnvType),INTENT(IN) :: this
REAL(SSK),INTENT(IN) :: sendbuf(:)
REAL(SSK),INTENT(OUT),ALLOCATABLE :: recvbuf(:)
INTEGER(SIK),INTENT(OUT),ALLOCATABLE :: recvcounts(:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank
!
INTEGER(SIK) :: rank,i
INTEGER(SIK),ALLOCATABLE :: displs(:)
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
REQUIRE(SIZE(recvbuf) == myPE%nproc)
REQUIRE(rank >= 0)
REQUIRE(rank < this%nproc)
ALLOCATE(recvcounts(this%nproc))
CALL this%gather(SIZE(sendbuf),recvcounts,rank)
ALLOCATE(displs(this%nproc))
IF(this%rank == rank) THEN
displs(1)=0
DO i=2,this%nproc
displs(i)=displs(i-1)+recvcounts(i-1)
ENDDO !i
ENDIF
ALLOCATE(recvbuf(SUM(recvcounts)))
#ifdef HAVE_MPI
CALL MPI_Gather(sendbuf,1,MPI_INTEGER8,recvbuf,1,MPI_INTEGER8, &
rank,myPE%comm,mpierr)
CALL MPI_gatherV(sendbuf,SIZE(sendbuf),MPI_REAL,recvbuf,recvcounts, &
displs,MPI_REAL,rank,this%comm,mpierr)
#else
recvbuf(1)=sendbuf
recvbuf=sendbuf
#endif
ENDSUBROUTINE gather_SLK0_MPI_Env_type
IF(this%rank /= rank) THEN
DEALLOCATE(recvbuf)
DEALLOCATE(recvcounts)
ENDIF
ENDSUBROUTINE gatherv_SSK1_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Gather
!> @param myPE parallel environment where the communication originates
!> @param sendbuf the data which is to be sent
!> @param recvbuf the data which is to be sent
!> @param root the rank of the root process
!> @brief Wrapper routine for MPI_gatherv for 1D array of SDK reals
!> @param this the MPI environment
!> @param sendbuf the data to be sent by this process
!> @param recvbuf the data received on the root process
!> @param recvcounts the counts of data received by the root process from other processes
!> @param root the process to gather all the data to; optional, defaults to 0
!>
SUBROUTINE gather_SLK1_MPI_Env_type(myPE,sendbuf,recvbuf,root)
CLASS(MPI_EnvType),INTENT(IN) :: myPE
INTEGER(SLK),INTENT(IN) :: sendbuf(:)
INTEGER(SLK),INTENT(INOUT) :: recvbuf(:,:)
SUBROUTINE gatherv_SDK1_MPI_Env_type(this,sendbuf,recvbuf,recvcounts,root)
CLASS(MPI_EnvType),INTENT(IN) :: this
REAL(SDK),INTENT(IN) :: sendbuf(:)
REAL(SDK),INTENT(OUT),ALLOCATABLE :: recvbuf(:)
INTEGER(SIK),INTENT(OUT),ALLOCATABLE :: recvcounts(:)
INTEGER(SIK),INTENT(IN),OPTIONAL :: root
INTEGER(SIK) :: rank,count
#ifndef HAVE_MPI
INTEGER(SIK)::i,j,n
#endif
!
INTEGER(SIK) :: rank,i
INTEGER(SIK),ALLOCATABLE :: displs(:)
rank=0
IF(PRESENT(root)) rank=root
REQUIRE(0 <= rank)
REQUIRE(rank < myPE%nproc)
count=SIZE(sendbuf)
REQUIRE(SIZE(recvbuf) == myPE%nproc*count)
REQUIRE(rank >= 0)
REQUIRE(rank < this%nproc)
ALLOCATE(recvcounts(this%nproc))
CALL this%gather(SIZE(sendbuf),recvcounts,rank)
ALLOCATE(displs(this%nproc))
IF(this%rank == rank) THEN
displs(1)=0
DO i=2,this%nproc
displs(i)=displs(i-1)+recvcounts(i-1)
ENDDO !i
ENDIF
ALLOCATE(recvbuf(SUM(recvcounts)))
#ifdef HAVE_MPI
CALL MPI_Gather(sendbuf,count,MPI_INTEGER8,recvbuf,count, &
MPI_INTEGER8,rank,myPE%comm,mpierr)
CALL MPI_gatherV(sendbuf,SIZE(sendbuf),MPI_REAL8,recvbuf,recvcounts, &
displs,MPI_REAL8,rank,this%comm,mpierr)
#else
DO n=1,count
i=MOD(n-1,SIZE(recvbuf,DIM=1))+1
j=(n-1)/SIZE(recvbuf,DIM=1)+1
recvbuf(i,j)=sendbuf(n)
ENDDO
recvbuf=sendbuf
#endif
ENDSUBROUTINE gather_SLK1_MPI_Env_type
IF(this%rank /= rank) THEN
DEALLOCATE(recvbuf)
DEALLOCATE(recvcounts)
ENDIF
ENDSUBROUTINE gatherv_SDK1_MPI_Env_type
!
!-------------------------------------------------------------------------------
!> @brief Wrapper routine calls MPI_Scatter
......
......@@ -115,8 +115,10 @@ SUBROUTINE testMPIEnv()
INTEGER(SLK) :: sbuf(2)
INTEGER(SIK) :: sbuf_SIK(2),sbuf0_SIK
REAL(SRK) :: sbuf_SRK(2),sbuf0_SRK
REAL(SSK),ALLOCATABLE :: sendSSK1(:),recvSSK1(:)
REAL(SDK),ALLOCATABLE :: sendSDK1(:),recvSDK1(:)
INTEGER(SIK),ALLOCATABLE :: ranks_SIK(:),ranks2_SIK(:,:)
INTEGER(SIK),ALLOCATABLE :: testIDX(:),testWGT(:)
INTEGER(SIK),ALLOCATABLE :: testIDX(:),testWGT(:),recvcounts(:)
INTEGER(SLK),ALLOCATABLE :: ranks(:),ranks2(:,:)
LOGICAL(SBK) :: bool,bool1d(10),bool2d(2,5),bool3d(2,5,2),bool4d(2,5,2,5)
TYPE(MPI_EnvType) :: testMPI,testMPI2
......@@ -294,6 +296,111 @@ SUBROUTINE testMPIEnv()
ENDIF
ENDDO
COMPONENT_TEST('%gatherv')
IF(testMPI%nproc > 1) THEN
IF(testMPI%rank == 0) THEN
sendSSK1 = [1.0_SSK, 2.0_SSK, 3.0_SSK]
ELSE
sendSSK1 = [4.0_SSK, 5.0_SSK]
ENDIF
CALL testMPI%gatherv(sendSSK1,recvSSK1,recvcounts)
IF(testMPI%rank == 0) THEN
ASSERT_EQ(SIZE(recvSSK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSSK1(1),1.0_SSK,'receive(1)')
ASSERT_APPROXEQ(recvSSK1(2),2.0_SSK,'receive(2)')
ASSERT_APPROXEQ(recvSSK1(3),3.0_SSK,'receive(3)')
ASSERT_APPROXEQ(recvSSK1(4),4.0_SSK,'receive(4)')
ASSERT_APPROXEQ(recvSSK1(5),5.0_SSK,'receive(5)')
ASSERT_EQ(recvcounts(1),3,'recvcounts(1)')
ASSERT_EQ(recvcounts(2),2,'recvcounts(2)')
DEALLOCATE(recvSSK1)
DEALLOCATE(recvcounts)
ELSE
ASSERT(.NOT.ALLOCATED(recvSSK1),'non-root receive')
ASSERT(.NOT.ALLOCATED(recvcounts),'non-root recvcounts')
ENDIF
CALL testMPI%gatherv(sendSSK1,recvSSK1,recvcounts,1)
IF(testMPI%rank == 1) THEN
ASSERT_EQ(SIZE(recvSSK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSSK1(1),1.0_SSK,'receive(1)')
ASSERT_APPROXEQ(recvSSK1(2),2.0_SSK,'receive(2)')
ASSERT_APPROXEQ(recvSSK1(3),3.0_SSK,'receive(3)')
ASSERT_APPROXEQ(recvSSK1(4),4.0_SSK,'receive(4)')
ASSERT_APPROXEQ(recvSSK1(5),5.0_SSK,'receive(5)')
ASSERT_EQ(recvcounts(1),3,'recvcounts(1)')
ASSERT_EQ(recvcounts(2),2,'recvcounts(2)')
DEALLOCATE(recvSSK1)
DEALLOCATE(recvcounts)
ELSE
ASSERT(.NOT.ALLOCATED(recvSSK1),'non-root receive')
ASSERT(.NOT.ALLOCATED(recvcounts),'non-root recvcounts')
ENDIF
ELSE
sendSSK1 = [1.0_SSK, 2.0_SSK, 3.0_SSK, 4.0_SSK, 5.0_SSK]
CALL testMPI%gatherv(sendSSK1,recvSSK1,recvcounts)
ASSERT_EQ(SIZE(recvSSK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSSK1(1),1.0_SSK,'receive(1)')
ASSERT_APPROXEQ(recvSSK1(2),2.0_SSK,'receive(2)')
ASSERT_APPROXEQ(recvSSK1(3),3.0_SSK,'receive(3)')
ASSERT_APPROXEQ(recvSSK1(4),4.0_SSK,'receive(4)')
ASSERT_APPROXEQ(recvSSK1(5),5.0_SSK,'receive(5)')
ASSERT_EQ(recvcounts(1),5,'recvcounts(1)')
DEALLOCATE(recvSSK1)
DEALLOCATE(recvcounts)
ENDIF
IF(testMPI%nproc > 1) THEN
IF(testMPI%rank == 0) THEN
sendSDK1 = [1.0_SDK, 2.0_SDK, 3.0_SDK]
ELSE
sendSDK1 = [4.0_SDK, 5.0_SDK]
ENDIF
CALL testMPI%gatherv(sendSDK1,recvSDK1,recvcounts)
IF(testMPI%rank == 0) THEN
ASSERT_EQ(SIZE(recvSDK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSDK1(1),1.0_SDK,'receive(1)')
ASSERT_APPROXEQ(recvSDK1(2),2.0_SDK,'receive(2)')
ASSERT_APPROXEQ(recvSDK1(3),3.0_SDK,'receive(3)')
ASSERT_APPROXEQ(recvSDK1(4),4.0_SDK,'receive(4)')
ASSERT_APPROXEQ(recvSDK1(5),5.0_SDK,'receive(5)')
ASSERT_EQ(recvcounts(1),3,'recvcounts(1)')
ASSERT_EQ(recvcounts(2),2,'recvcounts(2)')
DEALLOCATE(recvSDK1)
DEALLOCATE(recvcounts)
ELSE
ASSERT(.NOT.ALLOCATED(recvSDK1),'non-root receive')
ASSERT(.NOT.ALLOCATED(recvcounts),'non-root recvcounts')
ENDIF
CALL testMPI%gatherv(sendSDK1,recvSDK1,recvcounts,1)
IF(testMPI%rank == 1) THEN
ASSERT_EQ(SIZE(recvSDK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSDK1(1),1.0_SDK,'receive(1)')
ASSERT_APPROXEQ(recvSDK1(2),2.0_SDK,'receive(2)')
ASSERT_APPROXEQ(recvSDK1(3),3.0_SDK,'receive(3)')
ASSERT_APPROXEQ(recvSDK1(4),4.0_SDK,'receive(4)')
ASSERT_APPROXEQ(recvSDK1(5),5.0_SDK,'receive(5)')
ASSERT_EQ(recvcounts(1),3,'recvcounts(1)')
ASSERT_EQ(recvcounts(2),2,'recvcounts(2)')
DEALLOCATE(recvSDK1)
DEALLOCATE(recvcounts)
ELSE
ASSERT(.NOT.ALLOCATED(recvSDK1),'non-root receive')
ASSERT(.NOT.ALLOCATED(recvcounts),'non-root recvcounts')
ENDIF
ELSE
sendSDK1 = [1.0_SDK, 2.0_SDK, 3.0_SDK, 4.0_SDK, 5.0_SDK]
CALL testMPI%gatherv(sendSDK1,recvSDK1,recvcounts)
ASSERT_EQ(SIZE(recvSDK1),5,'SIZE receive')
ASSERT_APPROXEQ(recvSDK1(1),1.0_SDK,'receive(1)')
ASSERT_APPROXEQ(recvSDK1(2),2.0_SDK,'receive(2)')
ASSERT_APPROXEQ(recvSDK1(3),3.0_SDK,'receive(3)')
ASSERT_APPROXEQ(recvSDK1(4),4.0_SDK,'receive(4)')
ASSERT_APPROXEQ(recvSDK1(5),5.0_SDK,'receive(5)')
ASSERT_EQ(recvcounts(1),5,'recvcounts(1)')
DEALLOCATE(recvSDK1)
DEALLOCATE(recvcounts)
ENDIF
COMPONENT_TEST('%send/%recv')
sbuf_SIK = 0
IF(testMPI%nproc > 1) THEN
......
Markdown is supported
0% or .