Back to home page

MITgcm

 
 

    


File indexing completed on 2024-08-29 05:10:34 UTC

view on githubraw file Latest commit 5237154b on 2024-08-28 14:56:27 UTC
b043311a0b Jean*0001 #include "PACKAGES_CONFIG.h"
                0002 #include "CPP_EEOPTIONS.h"
                0003 
6f0365a9ce Oliv*0004 C--   File global_sum_singlecpu.F: Routines that perform global sum
50885e3fd3 Jean*0005 C                                  on a single CPU
5237154b93 Jean*0006 C--    Contents
                0007 C--    o GLOBAL_SUM_SINGLECPU_RL
                0008 C--    o GLOBAL_SUM_SINGLECPU_RS <- not yet coded
6f0365a9ce Oliv*0009 
b043311a0b Jean*0010 C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----|
6f0365a9ce Oliv*0011 CBOP
                0012 C     !ROUTINE: GLOBAL_SUM_SINGLECPU_RL
                0013 
                0014 C     !INTERFACE:
                0015       SUBROUTINE GLOBAL_SUM_SINGLECPU_RL(
                0016      I                       phiLocal,
                0017      O                       sumPhi,
50885e3fd3 Jean*0018      I                       oLi, oLj, myThid )
6f0365a9ce Oliv*0019       IMPLICIT NONE
                0020 C     !DESCRIPTION:
                0021 C     *==========================================================*
                0022 C     | SUBROUTINE GLOBAL\_SUM\_SINGLECPU\_RL
                0023 C     | o Handle sum for _RL data.
                0024 C     *==========================================================*
                0025 C     | Global sum of 2d array
                0026 C     | independent of tiling as sum is performed on a single CPU
                0027 C     | sum is performed in REAL*8
                0028 C     *==========================================================*
                0029 
                0030 C     !USES:
                0031 C     == Global data ==
                0032 #include "SIZE.h"
                0033 #include "EEPARAMS.h"
                0034 #include "EESUPPORT.h"
                0035 #include "GLOBAL_SUM.h"
50885e3fd3 Jean*0036 #ifdef ALLOW_EXCH2
                0037 #include "W2_EXCH2_SIZE.h"
                0038 #include "W2_EXCH2_TOPOLOGY.h"
                0039 #endif
                0040 #include "EEBUFF_SCPU.h"
6f0365a9ce Oliv*0041 
                0042 C     !INPUT/OUTPUT PARAMETERS:
                0043 C     == Routine arguments ==
                0044 C     phiLocal :: local input array without overlap regions.
                0045 C     sumPhi   :: Result of sum.
50885e3fd3 Jean*0046 C     oLi, oLj :: overlap size of input array in I & J direction.
6f0365a9ce Oliv*0047 C     myThid   :: My thread id.
50885e3fd3 Jean*0048       INTEGER oLi, oLj
                0049       _RL    phiLocal(1-oLi:sNx+oLi,1-oLj:sNy+oLj,nSx,nSy)
6f0365a9ce Oliv*0050       _RL     sumPhi
                0051       INTEGER myThid
                0052 
                0053 C     !LOCAL VARIABLES:
                0054 C     == Local variables ==
                0055 C- type declaration of: sumAll, globalBuf :
                0056 C         sumAll needs to have the same length as MPI_DOUBLE_PRECISION
50885e3fd3 Jean*0057       LOGICAL useExch2GlobLayOut, zeroBuff
                0058       INTEGER xSize, ySize
                0059       INTEGER i, j, ij
                0060       INTEGER bi, bj
6f0365a9ce Oliv*0061       Real*8  sumAll
                0062 #ifdef ALLOW_USE_MPI
1071dbe8ca Jean*0063       INTEGER pId, idest, itag
6f0365a9ce Oliv*0064       INTEGER istatus(MPI_STATUS_SIZE), ierr
                0065 #endif /* ALLOW_USE_MPI */
                0066 CEOP
                0067 
50885e3fd3 Jean*0068 #ifdef ALLOW_EXCH2
                0069       zeroBuff = .TRUE.
                0070       useExch2GlobLayOut = .TRUE.
                0071       xSize = exch2_global_Nx
                0072       ySize = exch2_global_Ny
                0073 #else /* ALLOW_EXCH2 */
                0074       zeroBuff = .FALSE.
                0075       useExch2GlobLayOut = .FALSE.
                0076       xSize = Nx
                0077       ySize = Ny
                0078 #endif /* ALLOW_EXCH2 */
1071dbe8ca Jean*0079 #ifdef ALLOW_USE_MPI
                0080       idest = 0
                0081       itag  = 0
                0082 #endif
50885e3fd3 Jean*0083 
                0084 C--   copy (and conversion to real*8) to Shared buffer:
                0085       DO bj = myByLo(myThid), myByHi(myThid)
                0086        DO bi = myBxLo(myThid), myBxHi(myThid)
                0087         DO j=1,sNy
                0088          DO i=1,sNx
                0089            sharedLocBuf_r8(i,j,bi,bj) = phiLocal(i,j,bi,bj)
                0090          ENDDO
                0091         ENDDO
                0092        ENDDO
                0093       ENDDO
                0094 
b043311a0b Jean*0095 C--   Master thread does the communications and the global sum
                0096 C--   Master thread cannot start until everyone is ready:
6f0365a9ce Oliv*0097       CALL BAR2( myThid )
b043311a0b Jean*0098       _BEGIN_MASTER( myThid )
6f0365a9ce Oliv*0099 
                0100 C--   Gather local arrays
50885e3fd3 Jean*0101       CALL GATHER_2D_R8(
                0102      O                       xy_buffer_r8,
                0103      I                       sharedLocBuf_r8,
                0104      I                       xSize, ySize,
                0105      I                       useExch2GlobLayOut, zeroBuff, myThid )
6f0365a9ce Oliv*0106 
1071dbe8ca Jean*0107       IF ( myProcId.EQ.0 ) THEN
6f0365a9ce Oliv*0108 
                0109 C--   Process 0 sums the global array
4d40368441 Jean*0110         sumAll = 0. _d 0
50885e3fd3 Jean*0111         DO ij=1,xSize*ySize
                0112           sumAll = sumAll + xy_buffer_r8(ij)
4d40368441 Jean*0113         ENDDO
6f0365a9ce Oliv*0114 
                0115 #ifdef ALLOW_USE_MPI
                0116 C--   Process 0 sends result to all other processes
1071dbe8ca Jean*0117         IF ( usingMPI ) THEN
                0118          DO pId = 1, (nPx*nPy)-1
6f0365a9ce Oliv*0119           CALL MPI_SEND (sumAll, 1, MPI_DOUBLE_PRECISION,
1071dbe8ca Jean*0120      &                   pId, itag, MPI_COMM_MODEL, ierr)
                0121          ENDDO
                0122         ENDIF
6f0365a9ce Oliv*0123 
1071dbe8ca Jean*0124       ELSEIF ( usingMPI ) THEN
50885e3fd3 Jean*0125 
6f0365a9ce Oliv*0126 C--   All proceses except 0 receive result from process 0
                0127           CALL MPI_RECV (sumAll, 1, MPI_DOUBLE_PRECISION,
                0128      &         idest, itag, MPI_COMM_MODEL, istatus, ierr)
                0129 
1071dbe8ca Jean*0130 #endif /* ALLOW_USE_MPI */
                0131 
                0132       ENDIF
6f0365a9ce Oliv*0133 
                0134 C--   Write solution to shared buffer (all threads can see it)
b043311a0b Jean*0135       phiGSR8(1,0) = sumAll
6f0365a9ce Oliv*0136 
                0137       _END_MASTER( myThid )
                0138 C--   Everyone wait for Master thread to be ready
                0139       CALL BAR2( myThid )
                0140 
                0141 C--   set result for every threads
b043311a0b Jean*0142       sumPhi = phiGSR8(1,0)
6f0365a9ce Oliv*0143 
                0144       RETURN
                0145       END