FMS  2024.03
Flexible Modeling System
mpp_domains.F90
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
10 !*
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 !* for more details.
15 !*
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 !-----------------------------------------------------------------------
20 !> @defgroup mpp_domains_mod mpp_domains_mod
21 !> @ingroup mpp
22 !> @brief Domain decomposition and domain update for message-passing codes
23 !> @author V. Balaji SGI/GFDL Princeton University
24 !!
25 !> A set of simple calls for domain
26 !! decomposition and domain updates on rectilinear grids. It requires the
27 !! module mpp.F90, upon which it is built.\n
28 !! Scalable implementations of finite-difference codes are generally
29 !! based on decomposing the model domain into subdomains that are
30 !! distributed among processors. These domains will then be obliged to
31 !! exchange data at their boundaries if data dependencies are merely
32 !! nearest-neighbour, or may need to acquire information from the global
33 !! domain if there are extended data dependencies, as in the spectral
34 !! transform. The domain decomposition is a key operation in the
35 !! development of parallel codes.\n
36 !!\n
37 !! mpp_domains_mod provides a domain decomposition and domain
38 !! update API for rectilinear grids, built on top of the mpp_mod API for message passing.
39 !! Features of mpp_domains_mod include:\n
40 !!\n
41 !! Simple, minimal API, with free access to underlying API for more complicated stuff.\n
42 !!\n
43 !! Design toward typical use in climate/weather CFD codes.\n
44 !!
45 !> @par[Domains]
46 !! It is assumed that domain decomposition will mainly be in 2
47 !! horizontal dimensions, which will in general be the two
48 !! fastest-varying indices. There is a separate implementation of 1D
49 !! decomposition on the fastest-varying index, and 1D decomposition on
50 !! the second index, treated as a special case of 2D decomposition, is
51 !! also possible. We define domain as the grid associated with a <I>task</I>.
52 !! We define the compute domain as the set of gridpoints that are
53 !! computed by a task, and the data domain as the set of points
54 !! that are required by the task for the calculation. There can in
55 !! general be more than 1 task per PE, though often
56 !! the number of domains is the same as the processor count. We define
57 !! the global domain as the global computational domain of the
58 !! entire model (i.e, the same as the computational domain if run on a
59 !! single processor). 2D domains are defined using a derived type domain2D,
60 !! constructed as follows (see comments in code for more details).
61 !!
62 !! type, public :: domain_axis_spec
63 !! private
64 !! integer :: begin, end, size, max_size
65 !! logical :: is_global
66 !! end type domain_axis_spec
67 !!
68 !! type, public :: domain1D
69 !! private
70 !! type(domain_axis_spec) :: compute, data, global, active
71 !! logical :: mustputb, mustgetb, mustputf, mustgetf, folded
72 !! type(domain1D), pointer, dimension(:) :: list
73 !! integer :: pe ! pe to which the domain is assigned
74 !! integer :: pos
75 !! end type domain1D
76 !!
77 !! type, public :: domain2D
78 !! private
79 !! type(domain1D) :: x
80 !! type(domain1D) :: y
81 !! type(domain2D), pointer, dimension(:) :: list
82 !! integer :: pe ! PE to which this domain is assigned
83 !! integer :: pos
84 !! end type domain2D
85 !!
86 !! type(domain1D), public :: NULL_DOMAIN1D
87 !! type(domain2D), public :: NULL_DOMAIN2D
88 
89 !> @addtogroup mpp_domains_mod
90 !> @{
91 
92 module mpp_domains_mod
93 
94 #if defined(use_libMPI)
95  use mpi
96 #endif
97 
98  use mpp_parameter_mod, only : mpp_debug, mpp_verbose, mpp_domain_time
99  use mpp_parameter_mod, only : global_data_domain, cyclic_global_domain, global,cyclic
100  use mpp_parameter_mod, only : agrid, bgrid_sw, bgrid_ne, cgrid_ne, cgrid_sw, dgrid_ne, dgrid_sw
101  use mpp_parameter_mod, only : fold_west_edge, fold_east_edge, fold_south_edge, fold_north_edge
102  use mpp_parameter_mod, only : wupdate, eupdate, supdate, nupdate, xupdate, yupdate
103  use mpp_parameter_mod, only : non_bitwise_exact_sum, bitwise_exact_sum, mpp_domain_time
104  use mpp_parameter_mod, only : center, corner, scalar_pair, scalar_bit, bitwise_efp_sum
105  use mpp_parameter_mod, only : north, north_east, east, south_east
106  use mpp_parameter_mod, only : south, south_west, west, north_west
107  use mpp_parameter_mod, only : max_domain_fields, null_pe, domain_id_base
108  use mpp_parameter_mod, only : zero, ninety, minus_ninety, one_hundred_eighty, max_tiles
109  use mpp_parameter_mod, only : event_send, event_recv, root_global
110  use mpp_parameter_mod, only : nonblock_update_tag, edgeonly, edgeupdate
111  use mpp_parameter_mod, only : nonsymedge, nonsymedgeupdate
112  use mpp_data_mod, only : mpp_domains_stack, ptr_domains_stack
113  use mpp_data_mod, only : mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
114  use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_error, fatal, warning, note
116  use mpp_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end
117  use mpp_mod, only : mpp_max, mpp_min, mpp_sum, mpp_get_current_pelist, mpp_broadcast
118  use mpp_mod, only : mpp_sum_ad
119  use mpp_mod, only : mpp_sync, mpp_init, lowercase
120  use mpp_mod, only : input_nml_file, mpp_alltoall
121  use mpp_mod, only : mpp_type, mpp_byte
122  use mpp_mod, only : mpp_type_create, mpp_type_free
123  use mpp_mod, only : comm_tag_1, comm_tag_2, comm_tag_3, comm_tag_4
124  use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist
125  use mpp_memutils_mod, only : mpp_memuse_begin, mpp_memuse_end
126  use mpp_efp_mod, only : mpp_reproducing_sum
127  use platform_mod
128  implicit none
129  private
130 
131  !--- public paramters imported from mpp_domains_parameter_mod
132  public :: global_data_domain, cyclic_global_domain, bgrid_ne, bgrid_sw, cgrid_ne, cgrid_sw, agrid
133  public :: dgrid_ne, dgrid_sw, fold_west_edge, fold_east_edge, fold_south_edge, fold_north_edge
134  public :: wupdate, eupdate, supdate, nupdate, xupdate, yupdate
135  public :: non_bitwise_exact_sum, bitwise_exact_sum, mpp_domain_time, bitwise_efp_sum
136  public :: center, corner, scalar_pair
137  public :: north, north_east, east, south_east
138  public :: south, south_west, west, north_west
139  public :: zero, ninety, minus_ninety, one_hundred_eighty
140  public :: edgeupdate, nonsymedgeupdate
141 
142  !--- public data imported from mpp_data_mod
143  public :: null_domain1d, null_domain2d
144 
147 
148  !--- public interface from mpp_domains_util.h
151  public :: mpp_get_layout, mpp_get_pelist, operator(.EQ.), operator(.NE.)
157  public :: mpp_get_tile_list
166  public :: mpp_clear_group_update
168  public :: mpp_get_global_domains
170 
171  !--- public interface from mpp_domains_reduce.h
174  !--- public interface from mpp_domains_misc.h
182  public :: mpp_get_boundary
183  public :: mpp_update_domains_ad
184  public :: mpp_get_boundary_ad
186  !--- public interface from mpp_domains_define.h
191 
192  !--- public interface for unstruct domain
193  public :: mpp_define_unstruct_domain, domainug, mpp_get_ug_io_domain
194  public :: mpp_get_ug_domain_npes, mpp_get_ug_compute_domain, mpp_get_ug_domain_tile_id
195  public :: mpp_get_ug_domain_pelist, mpp_get_ug_domain_grid_index
196  public :: mpp_get_ug_domain_ntiles, mpp_get_ug_global_domain
197  public :: mpp_global_field_ug, mpp_get_ug_domain_tile_list, mpp_get_ug_compute_domains
198  public :: mpp_define_null_ug_domain, null_domainug, mpp_get_ug_domains_index
199  public :: mpp_get_ug_sg_domain, mpp_get_ug_domain_tile_pe_inf
200 
201  !--- public interface from mpp_define_domains.inc
203  public :: mpp_shift_nest_domains
204  public :: mpp_get_nest_coarse_domain, mpp_get_nest_fine_domain
205  public :: mpp_is_nest_coarse, mpp_is_nest_fine
206  public :: mpp_get_nest_pelist, mpp_get_nest_npes
207  public :: mpp_get_nest_fine_pelist, mpp_get_nest_fine_npes
208 
209 !----------
210 !ug support
211  public :: mpp_domain_ug_is_tile_root_pe
212  public :: mpp_deallocate_domainug
213  public :: mpp_get_io_domain_ug_layout
214 !----------
215 
216  integer, parameter :: NAME_LENGTH = 64
217  integer, parameter :: MAXLIST = 100
218  integer, parameter :: MAXOVERLAP = 200
219  integer, parameter :: FIELD_S = 0
220  integer, parameter :: FIELD_X = 1
221  integer, parameter :: FIELD_Y = 2
222 
223  !> @}
224 
225  ! data types used by mpp_domains_mod
226 
227  !> @brief Private type for axis specification data for an unstructured grid
228  !> @ingroup mpp_domains_mod
230  private
231  integer :: begin, end, size, max_size
232  integer :: begin_index, end_index
233  end type unstruct_axis_spec
234 
235  !> Private type for axis specification data for an unstructured domain
236  !> @ingroup mpp_domains_mod
238  private
239  type(unstruct_axis_spec) :: compute
240  integer :: pe
241  integer :: pos
242  integer :: tile_id
243  end type unstruct_domain_spec
244 
245  !> Private type
246  !> @ingroup mpp_domains_mod
248  private
249  integer :: count = 0
250  integer :: pe
251  integer, pointer :: i(:)=>null()
252  integer, pointer :: j(:)=>null()
253  end type unstruct_overlap_type
254 
255  !> Private type
256  !> @ingroup mpp_domains_mod
258  private
259  integer :: nsend, nrecv
260  type(unstruct_overlap_type), pointer :: recv(:)=>null()
261  type(unstruct_overlap_type), pointer :: send(:)=>null()
262  end type unstruct_pass_type
263 
264  !> Domain information for managing data on unstructured grids
265  !> @ingroup mpp_domains_mod
266  type :: domainug
267  private
268  type(unstruct_axis_spec) :: compute, global !< axis specifications
269  type(unstruct_domain_spec), pointer :: list(:)=>null() !<
270  type(domainug), pointer :: io_domain=>null() !<
271  type(unstruct_pass_type) :: sg2ug
272  type(unstruct_pass_type) :: ug2sg
273  integer, pointer :: grid_index(:) => null() !< index of grid on current pe
274  type(domain2d), pointer :: sg_domain => null()
275  integer :: pe
276  integer :: pos
277  integer :: ntiles
278  integer :: tile_id
279  integer :: tile_root_pe
280  integer :: tile_npes
281  integer :: npes_io_group
282  integer(i4_kind) :: io_layout
283  end type domainug
284 
285  !> Used to specify index limits along an axis of a domain
286  !> @ingroup mpp_domains_mod
288  private
289  integer :: begin !< start of domain axis
290  integer :: end !< end of domain axis
291  integer :: size !< size of domain axis
292  integer :: max_size !< max size in set
293  logical :: is_global !< .true. if domain axis extent covers global domain
294  end type domain_axis_spec
295 
296  !> A private type used to specify index limits for a domain decomposition
297  !> @ingroup mpp_domains_mod
299  private
300  type(domain_axis_spec) :: compute
301  type(domain_axis_spec) :: global
302  integer :: pos
303  end type domain1d_spec
304 
305  !> @brief Private type to specify multiple index limits and pe information for a 2D domain
306  !> @ingroup mpp_domains_mod
308  private
309  type(domain1d_spec), pointer :: x(:) => null() !< x-direction domain decomposition
310  type(domain1d_spec), pointer :: y(:) => null() !< y-direction domain decomposition
311  integer, pointer :: tile_id(:) => null() !< tile id of each tile
312  integer :: pe !< PE to which this domain is assigned
313  integer :: pos !< position of this PE within link list
314  integer :: tile_root_pe !< root pe of tile.
315  end type domain2d_spec
316 
317  !> Type for overlapping data
318  !> @ingroup mpp_domains_mod
319  type :: overlap_type
320  private
321  integer :: count = 0 !< number of overlapping
322  integer :: pe
323  integer :: start_pos !< start position in the buffer
324  integer :: totsize !< all message size
325  integer , pointer :: msgsize(:) => null() !< overlapping msgsize to be sent or received
326  integer, pointer :: tileme(:) => null() !< my tile id for this overlap
327  integer, pointer :: tilenbr(:) => null() !< neighbor tile id for this overlap
328  integer, pointer :: is(:) => null() !< starting i-index
329  integer, pointer :: ie(:) => null() !< ending i-index
330  integer, pointer :: js(:) => null() !< starting j-index
331  integer, pointer :: je(:) => null() !< ending j-index
332  integer, pointer :: dir(:) => null() !< direction ( value 1,2,3,4 = E,S,W,N)
333  integer, pointer :: rotation(:) => null() !< rotation angle.
334  integer, pointer :: index(:) => null() !< for refinement
335  logical, pointer :: from_contact(:) => null() !< indicate if the overlap is computed from
336  !! define_contact_overlap
337  end type overlap_type
338 
339  !> Private type for overlap specifications
340  !> @ingroup mpp_domains_mod
341  type :: overlapspec
342  private
343  integer :: whalo, ehalo, shalo, nhalo !< halo size
344  integer :: xbegin, xend, ybegin, yend
345  integer :: nsend, nrecv
346  integer :: sendsize, recvsize
347  type(overlap_type), pointer :: send(:) => null()
348  type(overlap_type), pointer :: recv(:) => null()
349  type(overlapspec), pointer :: next => null()
350  end type overlapspec
351 
352  !> @brief Upper and lower x and y bounds for a tile
353  !> @ingroup mpp_domains_mod
354  type :: tile_type
355  integer :: xbegin, xend, ybegin, yend
356  end type tile_type
357 
358  !> The domain2D type contains all the necessary information to
359  !! define the global, compute and data domains of each task, as well as the PE
360  !! associated with the task. The PEs from which remote data may be
361  !! acquired to update the data domain are also contained in a linked list of neighbours.
362  !!
363  !! Domain types of higher rank can be constructed from type domain1D
364  !! typically we only need 1 and 2D, but could need higher (e.g 3D LES)
365  !! some elements are repeated below if they are needed once per domain, not once per axis
366  !> @ingroup mpp_domains_mod
367  TYPE :: domain2d
368  private
369  character(len=NAME_LENGTH) :: name='unnamed' !< name of the domain, default is "unspecified"
370  integer(i8_kind) :: id
371  integer :: pe !< PE to which this domain is assigned
372  integer :: fold
373  integer :: pos !< position of this PE within link list
374  logical :: symmetry !< indicate the domain is symmetric or non-symmetric.
375  integer :: whalo, ehalo !< halo size in x-direction
376  integer :: shalo, nhalo !< halo size in y-direction
377  integer :: ntiles !< number of tiles within mosaic
378  integer :: comm_id !< MPI communicator for the mosaic
379  integer :: tile_comm_id !< MPI communicator for this tile of domain
380  integer :: max_ntile_pe !< maximum value in the pelist of number of tiles on each pe.
381  integer :: ncontacts !< number of contact region within mosaic.
382  logical :: rotated_ninety !< indicate if any contact rotate NINETY or MINUS_NINETY
383  logical :: initialized=.false. !< indicate if the overlapping is computed or not.
384  integer :: tile_root_pe !< root pe of current tile.
385  integer :: io_layout(2) !< io_layout, will be set through mpp_define_io_domain
386  !! default = domain layout
387  integer, pointer :: pearray(:,:) => null() !< pe of each layout position
388  integer, pointer :: tile_id(:) => null() !< tile id of each tile on current processor
389  integer, pointer :: tile_id_all(:)=> null() !< tile id of all the tiles of domain
390  type(domain1d), pointer :: x(:) => null() !< x-direction domain decomposition
391  type(domain1d), pointer :: y(:) => null() !< y-direction domain decomposition
392  type(domain2d_spec),pointer :: list(:) => null() !< domain decomposition on pe list
393  type(tile_type), pointer :: tilelist(:) => null() !< store tile information
394  type(overlapspec), pointer :: check_c => null() !< send and recv information for boundary
395  !! consistency check of C-cell
396  type(overlapspec), pointer :: check_e => null() !< send and recv information for boundary
397  !! consistency check of E-cell
398  type(overlapspec), pointer :: check_n => null() !< send and recv information for boundary
399  !! consistency check of N-cell
400  type(overlapspec), pointer :: bound_c => null() !< send information for getting boundary
401  !! value for symmetry domain.
402  type(overlapspec), pointer :: bound_e => null() !< send information for getting boundary
403  !! value for symmetry domain.
404  type(overlapspec), pointer :: bound_n => null() !< send information for getting boundary
405  !! value for symmetry domain.
406  type(overlapspec), pointer :: update_t => null() !< send and recv information for halo update of T-cell.
407  type(overlapspec), pointer :: update_e => null() !< send and recv information for halo update of E-cell.
408  type(overlapspec), pointer :: update_c => null() !< send and recv information for halo update of C-cell.
409  type(overlapspec), pointer :: update_n => null() !< send and recv information for halo update of N-cell.
410  type(domain2d), pointer :: io_domain => null() !< domain for IO, will be set through calling
411  !! mpp_set_io_domain ( this will be changed).
412  END TYPE domain2d
413 
414  !> Type used to represent the contact between tiles.
415  !> @note This type will only be used in mpp_domains_define.inc
416  !> @ingroup mpp_domains_mod
417  type, private :: contact_type
418  private
419  integer :: ncontact !< number of neighbor tile.
420  integer, pointer :: tile(:) =>null() !< neighbor tile
421  integer, pointer :: align1(:)=>null(), align2(:)=>null() !< alignment of me and neighbor
422  real, pointer :: refine1(:)=>null(), refine2(:)=>null() !
423  integer, pointer :: is1(:)=>null(), ie1(:)=>null() !< i-index of current tile repsenting contact
424  integer, pointer :: js1(:)=>null(), je1(:)=>null() !< j-index of current tile repsenting contact
425  integer, pointer :: is2(:)=>null(), ie2(:)=>null() !< i-index of neighbor tile repsenting contact
426  integer, pointer :: js2(:)=>null(), je2(:)=>null() !< j-index of neighbor tile repsenting contact
427  end type contact_type
428 
429  !> index bounds for use in @ref nestSpec
430  !> @ingroup mpp_domains_mod
431  type :: index_type
432  integer :: is_me, ie_me, js_me, je_me
433  integer :: is_you, ie_you, js_you, je_you
434  end type index_type
435 
436  !> Used to specify bounds and index information for nested tiles as a linked list
437  !> @ingroup mpp_domains_mod
438  type :: nestspec
439  private
440  integer :: xbegin, xend, ybegin, yend
441  integer :: xbegin_c, xend_c, ybegin_c, yend_c
442  integer :: xbegin_f, xend_f, ybegin_f, yend_f
443  integer :: xsize_c, ysize_c
444  type(index_type) :: west, east, south, north, center
445  integer :: nsend, nrecv
446  integer :: extra_halo
447  type(overlap_type), pointer :: send(:) => null()
448  type(overlap_type), pointer :: recv(:) => null()
449  type(nestSpec), pointer :: next => null()
450 
451  end type nestspec
452 
453  !> @brief domain with nested fine and course tiles
454  !> @ingroup mpp_domains_mod
456  character(len=NAME_LENGTH) :: name
457  integer :: num_level
458  integer, pointer :: nest_level(:) => null() !< Added for moving nest functionality
459  type(nest_level_type), pointer :: nest(:) => null()
460  integer :: num_nest
461  integer, pointer :: tile_fine(:), tile_coarse(:)
462  integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
463  integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
464  end type nest_domain_type
465 
466  !> Private type to hold data for each level of nesting
467  !> @ingroup mpp_domains_mod
469  private
470  logical :: on_level
471  logical :: is_fine, is_coarse
472  integer :: num_nest
473  integer :: my_num_nest
474  integer, pointer :: my_nest_id(:)
475  integer, pointer :: tile_fine(:), tile_coarse(:)
476  integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
477  integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
478  integer :: x_refine, y_refine
479  logical :: is_fine_pe, is_coarse_pe
480  integer, pointer :: pelist(:) => null()
481  integer, pointer :: pelist_fine(:) => null()
482  integer, pointer :: pelist_coarse(:) => null()
483  type(nestSpec), pointer :: C2F_T => null()
484  type(nestSpec), pointer :: C2F_C => null()
485  type(nestSpec), pointer :: C2F_E => null()
486  type(nestSpec), pointer :: C2F_N => null()
487  type(nestSpec), pointer :: F2C_T => null()
488  type(nestSpec), pointer :: F2C_C => null()
489  type(nestSpec), pointer :: F2C_E => null()
490  type(nestSpec), pointer :: F2C_N => null()
491  type(domain2d), pointer :: domain_fine => null()
492  type(domain2d), pointer :: domain_coarse => null()
493  end type nest_level_type
494 
495 
496  !> Used for sending domain data between pe's
497  !> @ingroup mpp_domains_mod
499  private
500  logical :: initialized=.false.
501  integer(i8_kind) :: id=-9999
502  integer(i8_kind) :: l_addr =-9999
503  integer(i8_kind) :: l_addrx =-9999
504  integer(i8_kind) :: l_addry =-9999
505  type(domain2D), pointer :: domain =>null()
506  type(domain2D), pointer :: domain_in =>null()
507  type(domain2D), pointer :: domain_out =>null()
508  type(overlapSpec), pointer :: send(:,:,:,:) => null()
509  type(overlapSpec), pointer :: recv(:,:,:,:) => null()
510  integer, dimension(:,:), allocatable :: sendis
511  integer, dimension(:,:), allocatable :: sendie
512  integer, dimension(:,:), allocatable :: sendjs
513  integer, dimension(:,:), allocatable :: sendje
514  integer, dimension(:,:), allocatable :: recvis
515  integer, dimension(:,:), allocatable :: recvie
516  integer, dimension(:,:), allocatable :: recvjs
517  integer, dimension(:,:), allocatable :: recvje
518  logical, dimension(:), allocatable :: S_do_buf
519  logical, dimension(:), allocatable :: R_do_buf
520  integer, dimension(:), allocatable :: cto_pe
521  integer, dimension(:), allocatable :: cfrom_pe
522  integer, dimension(:), allocatable :: S_msize
523  integer, dimension(:), allocatable :: R_msize
524  integer :: Slist_size=0, rlist_size=0
525  integer :: isize=0, jsize=0, ke=0
526  integer :: isize_in=0, jsize_in=0
527  integer :: isize_out=0, jsize_out=0
528  integer :: isize_max=0, jsize_max=0
529  integer :: gf_ioff=0, gf_joff=0
530  ! Remote data
531  integer, dimension(:) , allocatable :: isizeR
532  integer, dimension(:) , allocatable :: jsizeR
533  integer, dimension(:,:), allocatable :: sendisR
534  integer, dimension(:,:), allocatable :: sendjsR
535  integer(i8_kind), dimension(:), allocatable :: rem_addr
536  integer(i8_kind), dimension(:), allocatable :: rem_addrx
537  integer(i8_kind), dimension(:), allocatable :: rem_addry
538  integer(i8_kind), dimension(:,:), allocatable :: rem_addrl
539  integer(i8_kind), dimension(:,:), allocatable :: rem_addrlx
540  integer(i8_kind), dimension(:,:), allocatable :: rem_addrly
541  integer :: position !< data location. T, E, C, or N.
542  end type domaincommunicator2d
543 
544  integer, parameter :: max_request = 100
545 
546  !> Used for nonblocking data transfer
547  !> @ingroup mpp_domains_mod
549  integer :: recv_pos
550  integer :: send_pos
551  integer :: recv_msgsize
552  integer :: send_msgsize
553  integer :: update_flags
554  integer :: update_position
555  integer :: update_gridtype
556  integer :: update_whalo
557  integer :: update_ehalo
558  integer :: update_shalo
559  integer :: update_nhalo
560  integer :: request_send_count
561  integer :: request_recv_count
562  integer, dimension(MAX_REQUEST) :: request_send
563  integer, dimension(MAX_REQUEST) :: request_recv
564  integer, dimension(MAX_REQUEST) :: size_recv
565  integer, dimension(MAX_REQUEST) :: type_recv
566  integer, dimension(MAX_REQUEST) :: buffer_pos_send
567  integer, dimension(MAX_REQUEST) :: buffer_pos_recv
568  integer(i8_kind) :: field_addrs(MAX_DOMAIN_FIELDS)
569  integer(i8_kind) :: field_addrs2(MAX_DOMAIN_FIELDS)
570  integer :: nfields
571  end type nonblock_type
572 
573  !> used for updates on a group
574  !> @ingroup mpp_domains_mod
576  private
577  logical :: initialized = .false.
578  logical :: k_loop_inside = .true.
579  logical :: nonsym_edge = .false.
580  integer :: nscalar = 0
581  integer :: nvector = 0
582  integer :: flags_s=0, flags_v=0
583  integer :: whalo_s=0, ehalo_s=0, shalo_s=0, nhalo_s=0
584  integer :: isize_s=0, jsize_s=0, ksize_s=1
585  integer :: whalo_v=0, ehalo_v=0, shalo_v=0, nhalo_v=0
586  integer :: isize_x=0, jsize_x=0, ksize_v=1
587  integer :: isize_y=0, jsize_y=0
588  integer :: position=0, gridtype=0
589  logical :: recv_s(8), recv_x(8), recv_y(8)
590  integer :: is_s=0, ie_s=0, js_s=0, je_s=0
591  integer :: is_x=0, ie_x=0, js_x=0, je_x=0
592  integer :: is_y=0, ie_y=0, js_y=0, je_y=0
593  integer :: nrecv=0, nsend=0
594  integer :: npack=0, nunpack=0
595  integer :: reset_index_s = 0
596  integer :: reset_index_v = 0
597  integer :: tot_msgsize = 0
598  integer :: from_pe(MAXOVERLAP)
599  integer :: to_pe(MAXOVERLAP)
600  integer :: recv_size(MAXOVERLAP)
601  integer :: send_size(MAXOVERLAP)
602  integer :: buffer_pos_recv(MAXOVERLAP)
603  integer :: buffer_pos_send(MAXOVERLAP)
604  integer :: pack_type(MAXOVERLAP)
605  integer :: pack_buffer_pos(MAXOVERLAP)
606  integer :: pack_rotation(MAXOVERLAP)
607  integer :: pack_size(MAXOVERLAP)
608  integer :: pack_is(MAXOVERLAP)
609  integer :: pack_ie(MAXOVERLAP)
610  integer :: pack_js(MAXOVERLAP)
611  integer :: pack_je(MAXOVERLAP)
612  integer :: unpack_type(MAXOVERLAP)
613  integer :: unpack_buffer_pos(MAXOVERLAP)
614  integer :: unpack_rotation(MAXOVERLAP)
615  integer :: unpack_size(MAXOVERLAP)
616  integer :: unpack_is(MAXOVERLAP)
617  integer :: unpack_ie(MAXOVERLAP)
618  integer :: unpack_js(MAXOVERLAP)
619  integer :: unpack_je(MAXOVERLAP)
620  integer(i8_kind) :: addrs_s(MAX_DOMAIN_FIELDS)
621  integer(i8_kind) :: addrs_x(MAX_DOMAIN_FIELDS)
622  integer(i8_kind) :: addrs_y(MAX_DOMAIN_FIELDS)
623  integer :: buffer_start_pos = -1
624  integer :: request_send(MAX_REQUEST)
625  integer :: request_recv(MAX_REQUEST)
626  integer :: type_recv(MAX_REQUEST)
627  end type mpp_group_update_type
628 
629  !> One dimensional domain used to manage shared data access between pes
630  !> @ingroup mpp_domains_mod
631  type :: domain1d
632  private
633  type(domain_axis_spec) :: compute !< index limits for compute domain
634  type(domain_axis_spec) :: domain_data !< index limits for data domain
635  type(domain_axis_spec) :: global !< index limits for global domain
636  type(domain_axis_spec) :: memory !< index limits for memory domain
637  logical :: cyclic !< true if domain is cyclic
638  type(domain1d), pointer :: list(:) =>null() !< list of each pe's domains
639  integer :: pe !<PE to which this domain is assigned
640  integer :: pos !< position of this PE within link list, i.e domain%list(pos)%pe = pe
641  integer :: goffset !< needed for global sum
642  integer :: loffset !< needed for global sum
643  end type domain1d
644 
645 !#######################################################################
646 
647 !> @addtogroup mpp_domains_mod
648 !> @{
649 !***********************************************************************
650 !
651 ! module variables
652 !
653 !***********************************************************************
654  integer :: pe
655  logical :: module_is_initialized = .false.
656  logical :: debug = .false.
657  logical :: verbose=.false.
658  logical :: mosaic_defined = .false.
659  integer :: mpp_domains_stack_size=0
660  integer :: mpp_domains_stack_hwm=0
661  type(domain1d),save :: null_domain1d
662  type(domain2d),save :: null_domain2d
663  type(domainug),save :: null_domainug
664  integer :: current_id_update = 0
665  integer :: num_update = 0
666  integer :: num_nonblock_group_update = 0
667  integer :: nonblock_buffer_pos = 0
668  integer :: nonblock_group_buffer_pos = 0
669  logical :: start_update = .true.
670  logical :: complete_update = .false.
671  type(nonblock_type), allocatable :: nonblock_data(:)
672  integer, parameter :: max_nonblock_update = 100
673 
674  integer :: group_update_buffer_pos = 0
675  logical :: complete_group_update_on = .false.
676  !-------- The following variables are used in mpp_domains_comm.h
677 
678  integer, parameter :: max_addrs=512
679  integer(i8_kind),dimension(MAX_ADDRS),save :: addrs_sorted=-9999 !< list of sorted local addresses
680  integer, dimension(-1:MAX_ADDRS),save :: addrs_idx=-9999 !< index of address associated with d_comm
681  integer, save :: a_sort_len=0 !< length sorted memory list
682  integer, save :: n_addrs=0 !< number of memory addresses used
683 
684  integer(i8_kind), parameter :: addr2_base = 65536_i8_kind !< = 0x0000000000010000
685  integer, parameter :: max_addrs2=128
686  integer(i8_kind),dimension(MAX_ADDRS2),save :: addrs2_sorted=-9999 !< list of sorted local addresses
687  integer, dimension(-1:MAX_ADDRS2),save :: addrs2_idx=-9999 !< index of addr2 associated with d_comm
688  integer, save :: a2_sort_len=0 !< length sorted memory list
689  integer, save :: n_addrs2=0 !< number of memory addresses used
690 
691  integer, parameter :: max_dom_ids=128
692  integer(i8_kind),dimension(MAX_DOM_IDS),save :: ids_sorted=-9999 !< list of sorted domain identifiers
693  integer, dimension(-1:MAX_DOM_IDS),save :: ids_idx=-9999 !< index of d_comm associated with sorted addesses
694  integer, save :: i_sort_len=0 !< length sorted domain ids list
695  integer, save :: n_ids=0 !< number of domain ids used
696  !!(=i_sort_len; domain ids are never removed)
697 
698  integer, parameter :: max_fields=1024
699  integer(i8_kind), dimension(MAX_FIELDS),save :: dckey_sorted=-9999 !< list of sorted local addresses
700  ! Not sure why static d_comm fails during deallocation of derived type members; allocatable works
701  ! type(DomainCommunicator2D),dimension(MAX_FIELDS),save,target :: d_comm !< domain communicators
702  type(domaincommunicator2d),dimension(:),allocatable,save,target :: d_comm !< domain communicators
703  integer, dimension(-1:MAX_FIELDS),save :: d_comm_idx=-9999 !< index of
704  !! d_comm associated with sorted addresses
705  integer, save :: dc_sort_len=0 !< length sorted comm keys
706 !! (=num active communicators)
707  integer, save :: n_comm=0 !< number of communicators used
708 
709  ! integer(i8_kind), parameter :: GT_BASE=2**8
710  integer(i8_kind), parameter :: gt_base = 256_i8_kind !0x0000000000000100
711 
712  ! integer(i8_kind), parameter :: KE_BASE=2**48
713  integer(i8_kind), parameter :: ke_base = 281474976710656_i8_kind !0x0001000000000000
714 
715  integer(i8_kind) :: domain_cnt=0
716 
717  !--- the following variables are used in mpp_domains_misc.h
718  logical :: domain_clocks_on=.false.
719  integer :: send_clock=0, recv_clock=0, unpk_clock=0
720  integer :: wait_clock=0, pack_clock=0
721  integer :: send_pack_clock_nonblock=0, recv_clock_nonblock=0, unpk_clock_nonblock=0
722  integer :: wait_clock_nonblock=0
723  integer :: nest_send_clock=0, nest_recv_clock=0, nest_unpk_clock=0
724  integer :: nest_wait_clock=0, nest_pack_clock=0
725  integer :: group_recv_clock=0, group_send_clock=0, group_pack_clock=0, group_unpk_clock=0, group_wait_clock=0
726  integer :: nonblock_group_recv_clock=0, nonblock_group_send_clock=0, nonblock_group_pack_clock=0
727  integer :: nonblock_group_unpk_clock=0, nonblock_group_wait_clock=0
728 
729 !> namelist interface
730  character(len=32) :: debug_update_domain = "none" !< when debug_update_domain = none, no debug will be done.
731  !! When debug_update_domain is set to fatal,
732  !! the run will be exited with fatal error message
733  !! When debug_update_domain is set to warning,
734  !! the run will output warning message.
735  !! When debug update_domain is set to note,
736  !! the run will output some note message.
737  logical :: debug_message_passing = .false. !< Will check the consistency on the boundary between
738  !! processor/tile when updating domain for symmetric domain and
739  !! check the consistency on the north folded edge.
740  integer :: nthread_control_loop = 8 !< Determine the loop order for packing and unpacking.
741  !! When number of threads is greater than nthread_control_loop,
742  !! the k-loop will be moved outside and combined with number
743  !! of pack and unpack. When the number of threads is
744  !! less than or equal to nthread_control_loop, the k-loop
745  !! is moved inside, but still outside, of j,i loop.
746  logical :: efp_sum_overflow_check = .false. !< If .true., always do overflow_check
747  !! when doing EFP bitwise mpp_global_sum.
748  logical :: use_alltoallw = .false.
749  namelist /mpp_domains_nml/ debug_update_domain, domain_clocks_on, debug_message_passing, nthread_control_loop, &
750  efp_sum_overflow_check, use_alltoallw
751 
752  !***********************************************************************
753 
754  integer, parameter :: no_check = -1
755  integer :: debug_update_level = no_check
756 !> @}
757 !***********************************************************************
758 !
759 ! public interface from mpp_domains_define.h
760 !
761 !***********************************************************************
762  !> @brief Retrieve layout associated with a domain decomposition.
763  !> Given a global 2D domain and the number of divisions in the
764  !! decomposition ndivs (usually the PE count unless some
765  !! domains are \e masked) this calls returns a 2D domain layout.
766  !! By default, mpp_define_layout will attempt to divide the
767  !! 2D index space into domains that maintain the aspect ratio of the
768  !! global domain. If this cannot be done, the algorithm favours domains
769  !! that are longer in \e x than \e y, a preference that could improve vector performance.
770  !! <br>Example usage:
771  !! @code{.F90}call mpp_define_layout( global_indices, ndivs, layout )@endcode
772  !> @ingroup mpp_domains_mod
774  module procedure mpp_define_layout2d
775  end interface
776 
777  !> @brief Set up a domain decomposition.
778  !!
779  !> There are two forms for the \e mpp_define_domains call. The 2D version is generally
780  !! to be used but is built by repeated calls to the 1D version, also provided.
781  !!
782  !! <br>Example usage:
783  !!
784  !! call mpp_define_domains( global_indices, ndivs, domain, &
785  !! pelist, flags, halo, extent, maskmap )
786  !! call mpp_define_domains( global_indices, layout, domain, pelist, &
787  !! xflags, yflags, xhalo, yhalo, &
788  !! xextent, yextent, maskmap, name )
789  !!
790  !! @param global_indices Defines the global domain.
791  !! @param ndivs The number of domain divisions required.
792  !! @param [inout] domain Holds the resulting domain decomposition.
793  !! @param pelist List of PEs to which the domains are to be assigned.
794  !! @param flags An optional flag to pass additional information
795  !! about the desired domain topology. Useful flags in a 1D decomposition
796  !! include <TT>GLOBAL_DATA_DOMAIN</TT> and
797  !! <TT>CYCLIC_GLOBAL_DOMAIN</TT>. Flags are integers: multiple flags may
798  !! be added together. The flag values are public parameters available by
799  !! use association.
800  !! @param halo Width of the halo.
801  !! @param extent Normally <TT>mpp_define_domains</TT> attempts
802  !! an even division of the global domain across <TT>ndivs</TT>
803  !! domains. The <TT>extent</TT> array can be used by the user to pass a
804  !! custom domain division. The <TT>extent</TT> array has <TT>ndivs</TT>
805  !! elements and holds the compute domain widths, which should add up to
806  !! cover the global domain exactly.
807  !! @param maskmap Some divisions may be masked
808  !! (<TT>maskmap=.FALSE.</TT>) to exclude them from the computation (e.g
809  !! for ocean model domains that are all land). The <TT>maskmap</TT> array
810  !! is dimensioned <TT>ndivs</TT> and contains <TT>.TRUE.</TT> values for
811  !! any domain that must be <I>included</I> in the computation (default
812  !! all). The <TT>pelist</TT> array length should match the number of
813  !! domains included in the computation.
814  !!
815  !! <br>Example usage:
816  !! @code{.F90}
817  !! call mpp_define_domains( (/1,100/), 10, domain, &
818  !! flags=GLOBAL_DATA_DOMAIN+CYCLIC_GLOBAL_DOMAIN, halo=2 )
819  !! @endcode
820  !!
821  !! defines 10 compute domains spanning the range [1,100] of the global
822  !! domain. The compute domains are non-overlapping blocks of 10. All the data
823  !! domains are global, and with a halo of 2 span the range [-1:102]. And
824  !! since the global domain has been declared to be cyclic,
825  !! domain(9)%next => domain(0) and domain(0)%prev =>
826  !! domain(9). A field is allocated on the data domain, and computations proceed on
827  !! the compute domain. A call to mpp_update_domains would fill in the values
828  !! in the halo region:<br>
829  !!<br>
830  !! @code{.F90}
831  !! call mpp_get_data_domain( domain, isd, ied ) !returns -1 and 102
832  !! call mpp_get_compute_domain( domain, is, ie ) !returns (1,10) on PE 0 ...
833  !! allocate( a(isd:ied) )
834  !! do i = is,ie
835  !! a(i) = &lt;perform computations&gt;
836  !! end do
837  !! call mpp_update_domains( a, domain )
838  !! @endcode
839  !!<br>
840  !! The call to mpp_update_domainsfills in the regions outside
841  !! the compute domain. Since the global domain is cyclic, the values at
842  !! \e i=(-1,0) are the same as at \e i=(99,100); and \e i=(101,102)
843  !! are the same as \e i=(1,2).
844  !!
845  !! The 2D version is just an extension of this syntax to two dimensions.
846  !!
847  !! The 2D version of the above should generally be used in
848  !! codes, including 1D-decomposed ones, if there is a possibility of
849  !! future evolution toward 2D decomposition. The arguments are similar to
850  !! the 1D case, except that now we have optional arguments
851  !! flags, halo, extent and maskmap along two axes.
852  !!
853  !! flags can now take an additional possible value to fold one or more edges.
854  !! This is done by using flags \e FOLD_WEST_EDGE, \e FOLD_EAST_EDGE, \e FOLD_SOUTH_EDGE or
855  !! \e FOLD_NORTH_EDGE. When a fold exists (e.g cylindrical domain),
856  !! vector fields reverse sign upon
857  !! crossing the fold. This parity reversal is performed only in the vector version of
858  !! mpp_update_domains. In addition, shift operations may need to be applied to vector fields on
859  !! staggered grids, also described in the vector interface to mpp_update_domains.
860  !!
861  !! <TT>name</TT> is the name associated with the decomposition,
862  !! e.g <TT>'Ocean model'</TT>. If this argument is present,
863  !! <TT>mpp_define_domains</TT> will print the domain decomposition
864  !! generated to <TT>stdlog</TT>.
865  !!
866  !! <br>Examples:
867  !! call mpp_define_domains( (/1,100,1,100/), (/2,2/), domain, xhalo=1 )
868  !! will create the following domain layout:<br>
869  !!<br>
870  !! | domain |domain(1)|domain(2) |domain(3) |domain(4) |
871  !! |--------------|---------|-----------|-----------|-------------|
872  !! |Compute domain|1,50,1,50|51,100,1,50|1,50,51,100|51,100,51,100|
873  !! |Data domain |0,51,1,50|50,101,1,50|0,51,51,100|50,101,51,100|
874  !!
875  !!
876  !! Again, we allocate arrays on the data domain, perform computations
877  !! on the compute domain, and call mpp_update_domains to update the halo region.
878  !!
879  !! If we wished to perfom a 1D decomposition along Y on the same global domain,
880  !! we could use:
881  !!
882  !! call mpp_define_domains( (/1,100,1,100/), layout=(/4,1/), domain, xhalo=1 )
883  !!
884  !! This will create the following domain layout:<br>
885  !!<br>
886  !! | domain |domain(1) |domain(2) |domain(3) |domain(4) |
887  !! |--------------|----------|-----------|-----------|------------|
888  !! |Compute domain|1,100,1,25|1,100,26,50|1,100,51,75|1,100,76,100|
889  !! |Data domain |0,101,1,25|0,101,26,50|0,101,51,75|1,101,76,100|
890  !> @ingroup mpp_domains_mod
892  module procedure mpp_define_domains1d
893  module procedure mpp_define_domains2d
894  end interface
895 
896  !> Defines a nullified 1D or 2D domain
897  !!
898  !> <br> Example usage:
899  !! @code{.F90}
900  !! call mpp_define_null_domain(domain)
901  !! @endcode
902  !> @ingroup mpp_domains_mod
904  module procedure mpp_define_null_domain1d
905  module procedure mpp_define_null_domain2d
906  end interface
907 
908  !> Copy 1D or 2D domain
909  !> @param domain_in Input domain to get read
910  !> @param domain_out Output domain to get written to
911  !> @ingroup mpp_domains_mod
912  interface mpp_copy_domain
913  module procedure mpp_copy_domain1d
914  module procedure mpp_copy_domain2d
915  end interface mpp_copy_domain
916  !> Deallocate given 1D or 2D domain
917  !> @param domain an allocated @ref domain1D or @ref domain2D
918  !> @ingroup mpp_domains_mod
920  module procedure mpp_deallocate_domain1d
921  module procedure mpp_deallocate_domain2d
922  end interface
923 
924  !> Modifies the extents (compute, data and global) of a given domain
925  !> @ingroup mpp_domains_mod
927  module procedure mpp_modify_domain1d
928  module procedure mpp_modify_domain2d
929  end interface
930 
931 
932 !***********************************************************************
933 !
934 ! public interface from mpp_domains_misc.h
935 !
936 !***********************************************************************
937 
938 !> Performs halo updates for a given domain.<br>
939 !!
940 !! Used to perform a halo update of a
941 !! domain-decomposed array on each PE. \e MPP_TYPE can be of type
942 !! complex, integer, logical or real of 4-byte or 8-byte kind; of rank up to 5.
943 !! The vector version (with two input data fields) is only present for real types.
944 !! For 2D domain updates, if there are halos present along both
945 !! x and y, we can choose to update one only, by specifying \e flags=XUPDATE or \e flags=YUPDATE.
946 !! In addition, one-sided updates can be performed by setting flags
947 !! to any combination of WUPDATE, EUPDATE, SUPDATE and NUPDATE
948 !! to update the west, east, north and south halos respectively.
949 !! Any combination of halos may be used by adding the requisite flags, e.g:
950 !! \e flags=XUPDATE+SUPDATE or \e flags=EUPDATE+WUPDATE+SUPDATE will update the east,
951 !! west and south halos.<br>
952 !!<br>
953 !! If a call to \e mpp_update_domains involves at least one E-W
954 !! halo and one N-S halo, the corners involved will also be updated, i.e,
955 !! in the example above, the SE and SW corners will be updated.<br>
956 !! If \e flags is not supplied, that is equivalent to \e flags=XUPDATE+YUPDATE.<br>
957 !!<br>
958 !! The vector version is passed the \e x and \e y components of a vector field in tandem,
959 !! and both are updated upon return. They are passed together to treat parity issues on various
960 !! grids. For example, on a cubic sphere projection, the \e x \e y components may be
961 !! interchanged when passing from an equatorial cube face to a polar face.
962 !! For grids with folds, vector components change sign on crossing the fold. Paired scalar
963 !! quantities can also be passed with the vector version if \e flags=SCALAR_PAIR, in which
964 !! case components are appropriately interchanged, but signs are not.<br>
965 !!<br>
966 !! Special treatment at boundaries such as folds is also required for
967 !! staggered grids. The following types of staggered grids are
968 !! recognized:<br>
969 !!<br>
970 !! 1) AGRID: values are at grid centers.<br>
971 !! 2) BGRID_NE: vector fields are at the NE vertex of a grid
972 !! cell, i.e: the array elements \e u(i,j)and \e v(i,j)are
973 !! actually at (i,j;) with respect to the grid centers.<br>
974 !! 3) BGRID_SW: vector fields are at the SW vertex of a grid
975 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
976 !! actually at (i;,j;) with respect to the grid centers<br>
977 !! 4) CGRID_NE: vector fields are at the N and E faces of a
978 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
979 !! are actually at (i;,j) and (i,j+&#189;) with respect to the
980 !! grid centers.<br>
981 !! 5) CGRID_SW: vector fields are at the S and W faces of a
982 !! grid cell, i.e: the array elements \e u(i,j)and \e v(i,j)
983 !! are actually at (i;,j) and (i,j;) with respect to the
984 !! grid centers.<br>
985 !!<br>
986 !! The gridtypes listed above are all available by use association as
987 !! integer parameters. The scalar version of \e mpp_update_domains
988 !! assumes that the values of a scalar field are always at \e AGRID
989 !! locations, and no special boundary treatment is required. If vector
990 !! fields are at staggered locations, the optional argument
991 !! \e gridtype must be appropriately set for correct treatment at
992 !! boundaries.
993 !!<br>
994 !! It is safe to apply vector field updates to the appropriate arrays
995 !! irrespective of the domain topology: if the topology requires no
996 !! special treatment of vector fields, specifying \e gridtype will
997 !! do no harm.<br>
998 !!<br>
999 !! \e mpp_update_domains internally buffers the date being sent
1000 !! and received into single messages for efficiency. A turnable internal
1001 !! buffer area in memory is provided for this purpose by
1002 !! \e mpp_domains_mod. The size of this buffer area can be set by
1003 !! the user by calling mpp_domains
1004 !! \e mpp_domains_set_stack_size.
1005 !!
1006 !! Example usage:
1007 !! call mpp_update_domains( field, domain, flags )
1008 !! Update a 1D domain for the given field.
1009 !! call mpp_update_domains( fieldx, fieldy, domain, flags, gridtype )
1010 !! Update a 2D domain for the given fields.
1011 !> @ingroup mpp_domains_mod
1013  module procedure mpp_update_domain2d_r8_2d
1014  module procedure mpp_update_domain2d_r8_3d
1015  module procedure mpp_update_domain2d_r8_4d
1016  module procedure mpp_update_domain2d_r8_5d
1017  module procedure mpp_update_domain2d_r8_2dv
1018  module procedure mpp_update_domain2d_r8_3dv
1019  module procedure mpp_update_domain2d_r8_4dv
1020  module procedure mpp_update_domain2d_r8_5dv
1021 #ifdef OVERLOAD_C8
1022  module procedure mpp_update_domain2d_c8_2d
1023  module procedure mpp_update_domain2d_c8_3d
1024  module procedure mpp_update_domain2d_c8_4d
1025  module procedure mpp_update_domain2d_c8_5d
1026 #endif
1027  module procedure mpp_update_domain2d_i8_2d
1028  module procedure mpp_update_domain2d_i8_3d
1029  module procedure mpp_update_domain2d_i8_4d
1030  module procedure mpp_update_domain2d_i8_5d
1031  module procedure mpp_update_domain2d_r4_2d
1032  module procedure mpp_update_domain2d_r4_3d
1033  module procedure mpp_update_domain2d_r4_4d
1034  module procedure mpp_update_domain2d_r4_5d
1035  module procedure mpp_update_domain2d_r4_2dv
1036  module procedure mpp_update_domain2d_r4_3dv
1037  module procedure mpp_update_domain2d_r4_4dv
1038  module procedure mpp_update_domain2d_r4_5dv
1039 #ifdef OVERLOAD_C4
1040  module procedure mpp_update_domain2d_c4_2d
1041  module procedure mpp_update_domain2d_c4_3d
1042  module procedure mpp_update_domain2d_c4_4d
1043  module procedure mpp_update_domain2d_c4_5d
1044 #endif
1045  module procedure mpp_update_domain2d_i4_2d
1046  module procedure mpp_update_domain2d_i4_3d
1047  module procedure mpp_update_domain2d_i4_4d
1048  module procedure mpp_update_domain2d_i4_5d
1049  end interface
1050 
1051 !> Interface to start halo updates
1052 !! \e mpp_start_update_domains is used to start a halo update of a
1053 !! domain-decomposed array on each PE. \e MPP_TYPE_ can be of type
1054 !! \e complex, \e integer, \e logical or \e real;
1055 !! of 4-byte or 8-byte kind; of rank up to 5. The vector version (with
1056 !! two input data fields) is only present for \ereal types.<br>
1057 !!<br>
1058 !! \empp_start_update_domains must be paired together with
1059 !! \empp_complete_update_domains. In \e mpp_start_update_domains,
1060 !! a buffer will be pre-post to receive (non-blocking) the
1061 !! data and data on computational domain will be packed and sent (non-blocking send)
1062 !! to other processor. In \e mpp_complete_update_domains, buffer will
1063 !! be unpacked to fill the halo and mpp_sync_self will be called to
1064 !! to ensure communication safe at the last call of mpp_complete_update_domains.<br>
1065 !!<br>
1066 !! Each mpp_update_domains can be replaced by the combination of mpp_start_update_domains
1067 !! and mpp_complete_update_domains. The arguments in mpp_start_update_domains
1068 !! and mpp_complete_update_domains should be the exact the same as in
1069 !! mpp_update_domains to be replaced except no optional argument "complete".
1070 !! The following are examples on how to replace mpp_update_domains with
1071 !! mpp_start_update_domains/mpp_complete_update_domains
1072 !!
1073 !> @par Example 1: Replace one scalar mpp_update_domains.<br>
1074 !!<br>
1075 !! Replace<br>
1076 !!<br>
1077 !! call mpp_update_domains(data, domain, flags=update_flags)<br>
1078 !!
1079 !! with<br>
1080 !!<br>
1081 !! id_update = mpp_start_update_domains(data, domain, flags=update_flags)<br>
1082 !! ...( doing some computation )<br>
1083 !! call mpp_complete_update_domains(id_update, data, domain, flags=update_flags)<br>
1084 !!
1085 !> @par Example 2: Replace group scalar mpp_update_domains<br>
1086 !!<br>
1087 !! Replace<br>
1088 !!<br>
1089 !! call mpp_update_domains(data_1, domain, flags=update_flags, complete=.false.)<br>
1090 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1091 !! call mpp_update_domains(data_n, domain, flags=update_flags, complete=.true. )<br>
1092 !!<br>
1093 !! With<br>
1094 !!<br>
1095 !! id_up_1 = mpp_start_update_domains(data_1, domain, flags=update_flags)<br>
1096 !! .... ( other n-2 call mpp_start_update_domains )<br>
1097 !! id_up_n = mpp_start_update_domains(data_n, domain, flags=update_flags)<br>
1098 !!<br>
1099 !! ..... ( doing some computation )<br>
1100 !!<br>
1101 !! call mpp_complete_update_domains(id_up_1, data_1, domain, flags=update_flags)<br>
1102 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1103 !! call mpp_complete_update_domains(id_up_n, data_n, domain, flags=update_flags)<br>
1104 !!
1105 !> @par Example 3: Replace group CGRID_NE vector, mpp_update_domains<br>
1106 !!<br>
1107 !! Replace<br>
1108 !!<br>
1109 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flgs, gridtype=CGRID_NE, complete=.false.)<br>
1110 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1111 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE, complete=.true. )<br>
1112 !!<br>
1113 !! with<br>
1114 !!<br>
1115 !! id_up_1 = mpp_start_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1116 !! .... ( other n-2 call mpp_start_update_domains )<br>
1117 !! id_up_n = mpp_start_update_domains(u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1118 !!<br>
1119 !! ..... ( doing some computation )<br>
1120 !!<br>
1121 !! call mpp_complete_update_domains(id_up_1, u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1122 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1123 !! call mpp_complete_update_domains(id_up_n, u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1124 !!<br>
1125 !! For 2D domain updates, if there are halos present along both
1126 !! \e x and \e y, we can choose to update one only, by
1127 !! specifying \e flags=XUPDATE or \e flags=YUPDATE. In
1128 !! addition, one-sided updates can be performed by setting \e flags
1129 !! to any combination of \e WUPDATE, \e EUPDATE,
1130 !! \e SUPDATE and \e NUPDATE, to update the west, east, north
1131 !! and south halos respectively. Any combination of halos may be used by
1132 !! adding the requisite flags, e.g: \e flags=XUPDATE+SUPDATE or
1133 !! \e flags=EUPDATE+WUPDATE+SUPDATE will update the east, west and
1134 !! south halos.<br>
1135 !!<br>
1136 !! If a call to \e mpp_start_update_domains/mpp_complete_update_domains involves at least one E-W
1137 !! halo and one N-S halo, the corners involved will also be updated, i.e,
1138 !! in the example above, the SE and SW corners will be updated.<br>
1139 !!<br>
1140 !! If \e flags is not supplied, that is
1141 !! equivalent to \e flags=XUPDATE+YUPDATE.<br>
1142 !!<br>
1143 !! The vector version is passed the \e x and \e y
1144 !! components of a vector field in tandem, and both are updated upon
1145 !! return. They are passed together to treat parity issues on various
1146 !! grids. For example, on a cubic sphere projection, the \e x and
1147 !! \e y components may be interchanged when passing from an
1148 !! equatorial cube face to a polar face. For grids with folds, vector
1149 !! components change sign on crossing the fold. Paired scalar quantities
1150 !! can also be passed with the vector version if flags=SCALAR_PAIR, in which
1151 !! case components are appropriately interchanged, but signs are not.<br>
1152 !!<br>
1153 !! Special treatment at boundaries such as folds is also required for
1154 !! staggered grids. The following types of staggered grids are
1155 !! recognized:
1156 !!<br>
1157 !! 1) \e AGRID: values are at grid centers.<br>
1158 !! 2) \e BGRID_NE: vector fields are at the NE vertex of a grid
1159 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
1160 !! actually at (i+&#189;,j+&#189;) with respect to the grid centers.<br>
1161 !! 3) \e BGRID_SW: vector fields are at the SW vertex of a grid
1162 !! cell, i.e., the array elements \e u(i,j) and \e v(i,j) are
1163 !! actually at (i-&#189;,j-&#189;) with respect to the grid centers.<br>
1164 !! 4) \e CGRID_NE: vector fields are at the N and E faces of a
1165 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1166 !! are actually at (i+&#189;,j) and (i,j+&#189;) with respect to the
1167 !! grid centers.<br>
1168 !! 5) \e CGRID_SW: vector fields are at the S and W faces of a
1169 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1170 !! are actually at (i-&#189;,j) and (i,j-&#189;) with respect to the
1171 !! grid centers.<br>
1172 !!<br>
1173 !! The gridtypes listed above are all available by use association as
1174 !! integer parameters. If vector fields are at staggered locations, the
1175 !! optional argument \e gridtype must be appropriately set for
1176 !! correct treatment at boundaries.
1177 !!<br>
1178 !! It is safe to apply vector field updates to the appropriate arrays
1179 !! irrespective of the domain topology: if the topology requires no
1180 !! special treatment of vector fields, specifying \e gridtype will
1181 !! do no harm.<br>
1182 !!<br>
1183 !! \e mpp_start_update_domains/mpp_complete_update_domains internally
1184 !! buffers the data being sent and received into single messages for efficiency.
1185 !! A turnable internal buffer area in memory is provided for this purpose by
1186 !! \e mpp_domains_mod. The size of this buffer area can be set by
1187 !! the user by calling \e mpp_domains_set_stack_size.<br>
1188 !! Example usage:
1189 !!
1190 !! call mpp_start_update_domains( field, domain, flags )
1191 !! call mpp_complete_update_domains( field, domain, flags )
1192 !> @ingroup mpp_domains_mod
1194  module procedure mpp_start_update_domain2d_r8_2d
1195  module procedure mpp_start_update_domain2d_r8_3d
1196  module procedure mpp_start_update_domain2d_r8_4d
1197  module procedure mpp_start_update_domain2d_r8_5d
1198  module procedure mpp_start_update_domain2d_r8_2dv
1199  module procedure mpp_start_update_domain2d_r8_3dv
1200  module procedure mpp_start_update_domain2d_r8_4dv
1201  module procedure mpp_start_update_domain2d_r8_5dv
1202 #ifdef OVERLOAD_C8
1203  module procedure mpp_start_update_domain2d_c8_2d
1204  module procedure mpp_start_update_domain2d_c8_3d
1205  module procedure mpp_start_update_domain2d_c8_4d
1206  module procedure mpp_start_update_domain2d_c8_5d
1207 #endif
1208  module procedure mpp_start_update_domain2d_i8_2d
1209  module procedure mpp_start_update_domain2d_i8_3d
1210  module procedure mpp_start_update_domain2d_i8_4d
1211  module procedure mpp_start_update_domain2d_i8_5d
1212  module procedure mpp_start_update_domain2d_r4_2d
1213  module procedure mpp_start_update_domain2d_r4_3d
1214  module procedure mpp_start_update_domain2d_r4_4d
1215  module procedure mpp_start_update_domain2d_r4_5d
1216  module procedure mpp_start_update_domain2d_r4_2dv
1217  module procedure mpp_start_update_domain2d_r4_3dv
1218  module procedure mpp_start_update_domain2d_r4_4dv
1219  module procedure mpp_start_update_domain2d_r4_5dv
1220 #ifdef OVERLOAD_C4
1221  module procedure mpp_start_update_domain2d_c4_2d
1222  module procedure mpp_start_update_domain2d_c4_3d
1223  module procedure mpp_start_update_domain2d_c4_4d
1224  module procedure mpp_start_update_domain2d_c4_5d
1225 #endif
1226  module procedure mpp_start_update_domain2d_i4_2d
1227  module procedure mpp_start_update_domain2d_i4_3d
1228  module procedure mpp_start_update_domain2d_i4_4d
1229  module procedure mpp_start_update_domain2d_i4_5d
1230  end interface
1231 
1232  !> Must be used after a call to @ref mpp_start_update_domains
1233  !! in order to complete a nonblocking domain update. See @ref mpp_start_update_domains
1234  !! for more info.
1235  !> @ingroup mpp_domains_mod
1237  module procedure mpp_complete_update_domain2d_r8_2d
1238  module procedure mpp_complete_update_domain2d_r8_3d
1239  module procedure mpp_complete_update_domain2d_r8_4d
1240  module procedure mpp_complete_update_domain2d_r8_5d
1241  module procedure mpp_complete_update_domain2d_r8_2dv
1242  module procedure mpp_complete_update_domain2d_r8_3dv
1243  module procedure mpp_complete_update_domain2d_r8_4dv
1244  module procedure mpp_complete_update_domain2d_r8_5dv
1245 #ifdef OVERLOAD_C8
1246  module procedure mpp_complete_update_domain2d_c8_2d
1247  module procedure mpp_complete_update_domain2d_c8_3d
1248  module procedure mpp_complete_update_domain2d_c8_4d
1249  module procedure mpp_complete_update_domain2d_c8_5d
1250 #endif
1251  module procedure mpp_complete_update_domain2d_i8_2d
1252  module procedure mpp_complete_update_domain2d_i8_3d
1253  module procedure mpp_complete_update_domain2d_i8_4d
1254  module procedure mpp_complete_update_domain2d_i8_5d
1255  module procedure mpp_complete_update_domain2d_r4_2d
1256  module procedure mpp_complete_update_domain2d_r4_3d
1257  module procedure mpp_complete_update_domain2d_r4_4d
1258  module procedure mpp_complete_update_domain2d_r4_5d
1259  module procedure mpp_complete_update_domain2d_r4_2dv
1260  module procedure mpp_complete_update_domain2d_r4_3dv
1261  module procedure mpp_complete_update_domain2d_r4_4dv
1262  module procedure mpp_complete_update_domain2d_r4_5dv
1263 #ifdef OVERLOAD_C4
1264  module procedure mpp_complete_update_domain2d_c4_2d
1265  module procedure mpp_complete_update_domain2d_c4_3d
1266  module procedure mpp_complete_update_domain2d_c4_4d
1267  module procedure mpp_complete_update_domain2d_c4_5d
1268 #endif
1269  module procedure mpp_complete_update_domain2d_i4_2d
1270  module procedure mpp_complete_update_domain2d_i4_3d
1271  module procedure mpp_complete_update_domain2d_i4_4d
1272  module procedure mpp_complete_update_domain2d_i4_5d
1273  end interface
1274 
1275  !> Private interface used for non blocking updates
1276  !> @ingroup mpp_domains_mod
1278  module procedure mpp_start_do_update_r8_3d
1279  module procedure mpp_start_do_update_r8_3dv
1280 #ifdef OVERLOAD_C8
1281  module procedure mpp_start_do_update_c8_3d
1282 #endif
1283  module procedure mpp_start_do_update_i8_3d
1284  module procedure mpp_start_do_update_r4_3d
1285  module procedure mpp_start_do_update_r4_3dv
1286 #ifdef OVERLOAD_C4
1287  module procedure mpp_start_do_update_c4_3d
1288 #endif
1289  module procedure mpp_start_do_update_i4_3d
1290  end interface
1291 
1292  !> Private interface used for non blocking updates
1293  !> @ingroup mpp_domains_mod
1295  module procedure mpp_complete_do_update_r8_3d
1296  module procedure mpp_complete_do_update_r8_3dv
1297 #ifdef OVERLOAD_C8
1298  module procedure mpp_complete_do_update_c8_3d
1299 #endif
1300  module procedure mpp_complete_do_update_i8_3d
1301  module procedure mpp_complete_do_update_r4_3d
1302  module procedure mpp_complete_do_update_r4_3dv
1303 #ifdef OVERLOAD_C4
1304  module procedure mpp_complete_do_update_c4_3d
1305 #endif
1306  module procedure mpp_complete_do_update_i4_3d
1307  end interface
1308 
1309  !> Constructor for the @ref mpp_group_update_type which is
1310  !! then used with @ref mpp_start_group_update
1311  !!
1312  !> @param
1313  !> @ingroup mpp_domains_mod
1315  module procedure mpp_create_group_update_r4_2d
1316  module procedure mpp_create_group_update_r4_3d
1317  module procedure mpp_create_group_update_r4_4d
1318  module procedure mpp_create_group_update_r4_2dv
1319  module procedure mpp_create_group_update_r4_3dv
1320  module procedure mpp_create_group_update_r4_4dv
1321  module procedure mpp_create_group_update_r8_2d
1322  module procedure mpp_create_group_update_r8_3d
1323  module procedure mpp_create_group_update_r8_4d
1324  module procedure mpp_create_group_update_r8_2dv
1325  module procedure mpp_create_group_update_r8_3dv
1326  module procedure mpp_create_group_update_r8_4dv
1327  end interface mpp_create_group_update
1328 
1329  !> @ingroup mpp_domains_mod
1331  module procedure mpp_do_group_update_r4
1332  module procedure mpp_do_group_update_r8
1333  end interface mpp_do_group_update
1334 
1335  !> Starts non-blocking group update
1336  !! Must be followed up with a call to @ref mpp_complete_group_update
1337  !! @ref mpp_group_update_type can be created with @ref mpp_create_group_update
1338  !!
1339  !> @param[inout] type(mpp_group_update_type) group type created for group update
1340  !> @param[inout] type(domain2D) domain to update
1341  !> @ingroup mpp_domains_mod
1343  module procedure mpp_start_group_update_r4
1344  module procedure mpp_start_group_update_r8
1345  end interface mpp_start_group_update
1346 
1347  !> Completes a pending non-blocking group update
1348  !! Must follow a call to @ref mpp_start_group_update
1349  !!
1350  !> @param[inout] type(mpp_group_update_type) group
1351  !> @param[inout] type(domain2D) domain
1352  !> @param[in] d_type data type
1353  !> @ingroup mpp_domains_mod
1355  module procedure mpp_complete_group_update_r4
1356  module procedure mpp_complete_group_update_r8
1357  end interface mpp_complete_group_update
1358 
1359  !> @ingroup mpp_domains_mod
1361  module procedure mpp_reset_group_update_field_r4_2d
1362  module procedure mpp_reset_group_update_field_r4_3d
1363  module procedure mpp_reset_group_update_field_r4_4d
1364  module procedure mpp_reset_group_update_field_r4_2dv
1365  module procedure mpp_reset_group_update_field_r4_3dv
1366  module procedure mpp_reset_group_update_field_r4_4dv
1367  module procedure mpp_reset_group_update_field_r8_2d
1368  module procedure mpp_reset_group_update_field_r8_3d
1369  module procedure mpp_reset_group_update_field_r8_4d
1370  module procedure mpp_reset_group_update_field_r8_2dv
1371  module procedure mpp_reset_group_update_field_r8_3dv
1372  module procedure mpp_reset_group_update_field_r8_4dv
1373  end interface mpp_reset_group_update_field
1374 
1375  !> Pass the data from coarse grid to fill the buffer to be ready to be interpolated
1376  !! onto fine grid.
1377  !! <br>Example usage:
1378  !!
1379  !! call mpp_update_nest_fine(field, nest_domain, wbuffer, ebuffer, sbuffer,
1380  !! nbuffer, nest_level, flags, complete, position, extra_halo, name,
1381  !! tile_count)
1382  !> @ingroup mpp_domains_mod
1384  module procedure mpp_update_nest_fine_r8_2d
1385  module procedure mpp_update_nest_fine_r8_3d
1386  module procedure mpp_update_nest_fine_r8_4d
1387  module procedure mpp_update_nest_fine_r8_2dv
1388  module procedure mpp_update_nest_fine_r8_3dv
1389  module procedure mpp_update_nest_fine_r8_4dv
1390 #ifdef OVERLOAD_C8
1391  module procedure mpp_update_nest_fine_c8_2d
1392  module procedure mpp_update_nest_fine_c8_3d
1393  module procedure mpp_update_nest_fine_c8_4d
1394 #endif
1395  module procedure mpp_update_nest_fine_i8_2d
1396  module procedure mpp_update_nest_fine_i8_3d
1397  module procedure mpp_update_nest_fine_i8_4d
1398  module procedure mpp_update_nest_fine_r4_2d
1399  module procedure mpp_update_nest_fine_r4_3d
1400  module procedure mpp_update_nest_fine_r4_4d
1401  module procedure mpp_update_nest_fine_r4_2dv
1402  module procedure mpp_update_nest_fine_r4_3dv
1403  module procedure mpp_update_nest_fine_r4_4dv
1404 #ifdef OVERLOAD_C4
1405  module procedure mpp_update_nest_fine_c4_2d
1406  module procedure mpp_update_nest_fine_c4_3d
1407  module procedure mpp_update_nest_fine_c4_4d
1408 #endif
1409  module procedure mpp_update_nest_fine_i4_2d
1410  module procedure mpp_update_nest_fine_i4_3d
1411  module procedure mpp_update_nest_fine_i4_4d
1412  end interface
1413 
1414  !> @ingroup mpp_domains_mod
1416  module procedure mpp_do_update_nest_fine_r8_3d
1417  module procedure mpp_do_update_nest_fine_r8_3dv
1418 #ifdef OVERLOAD_C8
1419  module procedure mpp_do_update_nest_fine_c8_3d
1420 #endif
1421  module procedure mpp_do_update_nest_fine_i8_3d
1422  module procedure mpp_do_update_nest_fine_r4_3d
1423  module procedure mpp_do_update_nest_fine_r4_3dv
1424 #ifdef OVERLOAD_C4
1425  module procedure mpp_do_update_nest_fine_c4_3d
1426 #endif
1427  module procedure mpp_do_update_nest_fine_i4_3d
1428  end interface
1429 
1430  !> Pass the data from fine grid to fill the buffer to be ready to be interpolated
1431  !! onto coarse grid.
1432  !! <br>Example usage:
1433  !!
1434  !! call mpp_update_nest_coarse(field, nest_domain, field_out, nest_level, complete,
1435  !! position, name, tile_count)
1436  !> @ingroup mpp_domains_mod
1438  module procedure mpp_update_nest_coarse_r8_2d
1439  module procedure mpp_update_nest_coarse_r8_3d
1440  module procedure mpp_update_nest_coarse_r8_4d
1441  module procedure mpp_update_nest_coarse_r8_2dv
1442  module procedure mpp_update_nest_coarse_r8_3dv
1443  module procedure mpp_update_nest_coarse_r8_4dv
1444 #ifdef OVERLOAD_C8
1445  module procedure mpp_update_nest_coarse_c8_2d
1446  module procedure mpp_update_nest_coarse_c8_3d
1447  module procedure mpp_update_nest_coarse_c8_4d
1448 #endif
1449  module procedure mpp_update_nest_coarse_i8_2d
1450  module procedure mpp_update_nest_coarse_i8_3d
1451  module procedure mpp_update_nest_coarse_i8_4d
1452  module procedure mpp_update_nest_coarse_r4_2d
1453  module procedure mpp_update_nest_coarse_r4_3d
1454  module procedure mpp_update_nest_coarse_r4_4d
1455  module procedure mpp_update_nest_coarse_r4_2dv
1456  module procedure mpp_update_nest_coarse_r4_3dv
1457  module procedure mpp_update_nest_coarse_r4_4dv
1458 #ifdef OVERLOAD_C4
1459  module procedure mpp_update_nest_coarse_c4_2d
1460  module procedure mpp_update_nest_coarse_c4_3d
1461  module procedure mpp_update_nest_coarse_c4_4d
1462 #endif
1463  module procedure mpp_update_nest_coarse_i4_2d
1464  module procedure mpp_update_nest_coarse_i4_3d
1465  module procedure mpp_update_nest_coarse_i4_4d
1466  end interface
1467 
1468  !> @brief Used by @ref mpp_update_nest_coarse to perform domain updates
1469  !!
1470  !> @ingroup mpp_domains_mod
1472  module procedure mpp_do_update_nest_coarse_r8_3d
1473  module procedure mpp_do_update_nest_coarse_r8_3dv
1474 #ifdef OVERLOAD_C8
1475  module procedure mpp_do_update_nest_coarse_c8_3d
1476 #endif
1477  module procedure mpp_do_update_nest_coarse_i8_3d
1478  module procedure mpp_do_update_nest_coarse_r4_3d
1479  module procedure mpp_do_update_nest_coarse_r4_3dv
1480 #ifdef OVERLOAD_C4
1481  module procedure mpp_do_update_nest_coarse_c4_3d
1482 #endif
1483  module procedure mpp_do_update_nest_coarse_i4_3d
1484  end interface
1485 
1486  !> Get the index of the data passed from fine grid to coarse grid.
1487  !! <br>Example usage:
1488  !!
1489  !! call mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse,
1490  !! is_fine, ie_fine, js_fine, je_fine, nest_level, position)
1491  !> @ingroup mpp_domains_mod
1493  module procedure mpp_get_f2c_index_fine
1494  module procedure mpp_get_f2c_index_coarse
1495  end interface
1496 
1497  !> Broadcasts domain to every pe. Only useful outside the context of it's own pelist
1498  !!
1499  !> <br>Example usage:
1500  !! call mpp_broadcast_domain(domain)
1501  !! call mpp_broadcast_domain(domain_in, domain_out)
1502  !! call mpp_broadcast_domain(domain, tile_coarse) ! nested domains
1503  !!
1504  !> @ingroup mpp_domains_mod
1506  module procedure mpp_broadcast_domain_1
1507  module procedure mpp_broadcast_domain_2
1508  module procedure mpp_broadcast_domain_ug
1509  module procedure mpp_broadcast_domain_nest_fine
1510  module procedure mpp_broadcast_domain_nest_coarse
1511  end interface
1512 
1513 !--------------------------------------------------------------
1514 ! for adjoint update
1515 !--------------------------------------------------------------
1516  !> Similar to @ref mpp_update_domains , updates adjoint domains
1517  !> @ingroup mpp_domains_mod
1519  module procedure mpp_update_domains_ad_2d_r8_2d
1520  module procedure mpp_update_domains_ad_2d_r8_3d
1521  module procedure mpp_update_domains_ad_2d_r8_4d
1522  module procedure mpp_update_domains_ad_2d_r8_5d
1523  module procedure mpp_update_domains_ad_2d_r8_2dv
1524  module procedure mpp_update_domains_ad_2d_r8_3dv
1525  module procedure mpp_update_domains_ad_2d_r8_4dv
1526  module procedure mpp_update_domains_ad_2d_r8_5dv
1527  module procedure mpp_update_domains_ad_2d_r4_2d
1528  module procedure mpp_update_domains_ad_2d_r4_3d
1529  module procedure mpp_update_domains_ad_2d_r4_4d
1530  module procedure mpp_update_domains_ad_2d_r4_5d
1531  module procedure mpp_update_domains_ad_2d_r4_2dv
1532  module procedure mpp_update_domains_ad_2d_r4_3dv
1533  module procedure mpp_update_domains_ad_2d_r4_4dv
1534  module procedure mpp_update_domains_ad_2d_r4_5dv
1535  end interface
1536 !
1537  !> Private interface used for @ref mpp_update_domains
1538  !> @ingroup mpp_domains_mod
1539  interface mpp_do_update
1540  module procedure mpp_do_update_r8_3d
1541  module procedure mpp_do_update_r8_3dv
1542 #ifdef OVERLOAD_C8
1543  module procedure mpp_do_update_c8_3d
1544 #endif
1545  module procedure mpp_do_update_i8_3d
1546  module procedure mpp_do_update_r4_3d
1547  module procedure mpp_do_update_r4_3dv
1548 #ifdef OVERLOAD_C4
1549  module procedure mpp_do_update_c4_3d
1550 #endif
1551  module procedure mpp_do_update_i4_3d
1552  end interface
1553  !> Private interface to updates data domain of 3D field whose computational domains have been computed
1554  !> @ingroup mpp_domains_mod
1555  interface mpp_do_check
1556  module procedure mpp_do_check_r8_3d
1557  module procedure mpp_do_check_r8_3dv
1558 #ifdef OVERLOAD_C8
1559  module procedure mpp_do_check_c8_3d
1560 #endif
1561  module procedure mpp_do_check_i8_3d
1562  module procedure mpp_do_check_r4_3d
1563  module procedure mpp_do_check_r4_3dv
1564 #ifdef OVERLOAD_C4
1565  module procedure mpp_do_check_c4_3d
1566 #endif
1567  module procedure mpp_do_check_i4_3d
1568  end interface
1569 
1570  !> Passes data from a structured grid to an unstructured grid
1571  !! <br>Example usage:
1572  !!
1573  !! call mpp_pass_SG_to_UG(domain, sg_data, ug_data)
1574  !> @ingroup mpp_domains_mod
1576  module procedure mpp_pass_sg_to_ug_r8_2d
1577  module procedure mpp_pass_sg_to_ug_r8_3d
1578  module procedure mpp_pass_sg_to_ug_r4_2d
1579  module procedure mpp_pass_sg_to_ug_r4_3d
1580  module procedure mpp_pass_sg_to_ug_i4_2d
1581  module procedure mpp_pass_sg_to_ug_i4_3d
1582  module procedure mpp_pass_sg_to_ug_l4_2d
1583  module procedure mpp_pass_sg_to_ug_l4_3d
1584  end interface
1585 
1586  !> Passes a data field from a structured grid to an unstructured grid
1587  !! <br>Example usage:
1588  !!
1589  !! call mpp_pass_SG_to_UG(SG_domain, field_SG, field_UG)
1590  !> @ingroup mpp_domains_mod
1592  module procedure mpp_pass_ug_to_sg_r8_2d
1593  module procedure mpp_pass_ug_to_sg_r8_3d
1594  module procedure mpp_pass_ug_to_sg_r4_2d
1595  module procedure mpp_pass_ug_to_sg_r4_3d
1596  module procedure mpp_pass_ug_to_sg_i4_2d
1597  module procedure mpp_pass_ug_to_sg_i4_3d
1598  module procedure mpp_pass_ug_to_sg_l4_2d
1599  module procedure mpp_pass_ug_to_sg_l4_3d
1600  end interface
1601 
1602  !> Passes a data field from a unstructured grid to an structured grid
1603  !! <br>Example usage:
1604  !!
1605  !! call mpp_pass_UG_to_SG(UG_domain, field_UG, field_SG)
1606  !!
1607  !> @ingroup mpp_domains_mod
1609  module procedure mpp_do_update_ad_r8_3d
1610  module procedure mpp_do_update_ad_r8_3dv
1611  module procedure mpp_do_update_ad_r4_3d
1612  module procedure mpp_do_update_ad_r4_3dv
1613  end interface
1614 
1615 !> Get the boundary data for symmetric domain when the data is at C, E, or N-cell center.<br>
1616 !! @ref mpp_get_boundary is used to get the boundary data for symmetric domain
1617 !! when the data is at C, E, or N-cell center. For cubic grid, the data should always
1618 !! at C-cell center.
1619 !! <br>Example usage:
1620 !!
1621 !! call mpp_get_boundary(domain, field, ebuffer, sbuffer, wbuffer, nbuffer)
1622 !! Get boundary information from domain and field and store in buffers
1623 !> @ingroup mpp_domains_mod
1625  module procedure mpp_get_boundary_r8_2d
1626  module procedure mpp_get_boundary_r8_3d
1627 ! module procedure mpp_get_boundary_r8_4d
1628 ! module procedure mpp_get_boundary_r8_5d
1629  module procedure mpp_get_boundary_r8_2dv
1630  module procedure mpp_get_boundary_r8_3dv
1631 ! module procedure mpp_get_boundary_r8_4dv
1632 ! module procedure mpp_get_boundary_r8_5dv
1633  module procedure mpp_get_boundary_r4_2d
1634  module procedure mpp_get_boundary_r4_3d
1635 ! module procedure mpp_get_boundary_r4_4d
1636 ! module procedure mpp_get_boundary_r4_5d
1637  module procedure mpp_get_boundary_r4_2dv
1638  module procedure mpp_get_boundary_r4_3dv
1639 ! module procedure mpp_get_boundary_r4_4dv
1640 ! module procedure mpp_get_boundary_r4_5dv
1641  end interface
1642 
1643  !> @ingroup mpp_domains_mod
1645  module procedure mpp_get_boundary_ad_r8_2d
1646  module procedure mpp_get_boundary_ad_r8_3d
1647  module procedure mpp_get_boundary_ad_r8_2dv
1648  module procedure mpp_get_boundary_ad_r8_3dv
1649  module procedure mpp_get_boundary_ad_r4_2d
1650  module procedure mpp_get_boundary_ad_r4_3d
1651  module procedure mpp_get_boundary_ad_r4_2dv
1652  module procedure mpp_get_boundary_ad_r4_3dv
1653  end interface
1654 
1655  !> @ingroup mpp_domains_mod
1657  module procedure mpp_do_get_boundary_r8_3d
1658  module procedure mpp_do_get_boundary_r8_3dv
1659  module procedure mpp_do_get_boundary_r4_3d
1660  module procedure mpp_do_get_boundary_r4_3dv
1661  end interface
1662 
1663  !> @ingroup mpp_domains_mod
1665  module procedure mpp_do_get_boundary_ad_r8_3d
1666  module procedure mpp_do_get_boundary_ad_r8_3dv
1667  module procedure mpp_do_get_boundary_ad_r4_3d
1668  module procedure mpp_do_get_boundary_ad_r4_3dv
1669  end interface
1670 
1671 !> Reorganization of distributed global arrays.<br>
1672 !! \e mpp_redistribute is used to reorganize a distributed array.
1673 !! \e MPP_TYPE_can be of type \e integer, \e complex, or \e real;
1674 !! of 4-byte or 8-byte kind; of rank up to 5.
1675 !! <br>Example usage:
1676 !! call mpp_redistribute( domain_in, field_in, domain_out, field_out )
1677 !> @ingroup mpp_domains_mod
1679  module procedure mpp_redistribute_r8_2d
1680  module procedure mpp_redistribute_r8_3d
1681  module procedure mpp_redistribute_r8_4d
1682  module procedure mpp_redistribute_r8_5d
1683 #ifdef OVERLOAD_C8
1684  module procedure mpp_redistribute_c8_2d
1685  module procedure mpp_redistribute_c8_3d
1686  module procedure mpp_redistribute_c8_4d
1687  module procedure mpp_redistribute_c8_5d
1688 #endif
1689  module procedure mpp_redistribute_i8_2d
1690  module procedure mpp_redistribute_i8_3d
1691  module procedure mpp_redistribute_i8_4d
1692  module procedure mpp_redistribute_i8_5d
1693 !!$ module procedure mpp_redistribute_l8_2D
1694 !!$ module procedure mpp_redistribute_l8_3D
1695 !!$ module procedure mpp_redistribute_l8_4D
1696 !!$ module procedure mpp_redistribute_l8_5D
1697  module procedure mpp_redistribute_r4_2d
1698  module procedure mpp_redistribute_r4_3d
1699  module procedure mpp_redistribute_r4_4d
1700  module procedure mpp_redistribute_r4_5d
1701 #ifdef OVERLOAD_C4
1702  module procedure mpp_redistribute_c4_2d
1703  module procedure mpp_redistribute_c4_3d
1704  module procedure mpp_redistribute_c4_4d
1705  module procedure mpp_redistribute_c4_5d
1706 #endif
1707  module procedure mpp_redistribute_i4_2d
1708  module procedure mpp_redistribute_i4_3d
1709  module procedure mpp_redistribute_i4_4d
1710  module procedure mpp_redistribute_i4_5d
1711 !!$ module procedure mpp_redistribute_l4_2D
1712 !!$ module procedure mpp_redistribute_l4_3D
1713 !!$ module procedure mpp_redistribute_l4_4D
1714 !!$ module procedure mpp_redistribute_l4_5D
1715  end interface
1716 
1717  !> @ingroup mpp_domains_mod
1719  module procedure mpp_do_redistribute_r8_3d
1720 #ifdef OVERLOAD_C8
1721  module procedure mpp_do_redistribute_c8_3d
1722 #endif
1723  module procedure mpp_do_redistribute_i8_3d
1724  module procedure mpp_do_redistribute_l8_3d
1725  module procedure mpp_do_redistribute_r4_3d
1726 #ifdef OVERLOAD_C4
1727  module procedure mpp_do_redistribute_c4_3d
1728 #endif
1729  module procedure mpp_do_redistribute_i4_3d
1730  module procedure mpp_do_redistribute_l4_3d
1731  end interface
1732 
1733 !> Parallel checking between two ensembles which run on different set pes at the same time<br>
1734 !! There are two forms for the <TT>mpp_check_field</TT> call. The 2D
1735 !! version is generally to be used and 3D version is built by repeated calls to the
1736 !! 2D version.<br>
1737 !! <br>Example usage:
1738 !! @code{.F90}
1739 !! call mpp_check_field(field_in, pelist1, pelist2, domain, mesg, &
1740 !! w_halo, s_halo, e_halo, n_halo, force_abort )
1741 !! @endcode
1742 !! @param field_in Field to be checked
1743 !! @param domain Domain of current pe
1744 !! @param mesg Message to be printed out
1745 !! @param w_halo Halo size to be checked, default is 0
1746 !! @param s_halo Halo size to be checked, default is 0
1747 !! @param e_halo Halo size to be checked, default is 0
1748 !! @param n_halo Halo size to be checked, default is 0
1749 !! @param force_abort When true, abort program when any difference found. Default is false.
1750 !> @ingroup mpp_domains_mod
1752  module procedure mpp_check_field_2d
1753  module procedure mpp_check_field_3d
1754  end interface
1755 
1756 !***********************************************************************
1757 !
1758 ! public interface from mpp_domains_reduce.h
1759 !
1760 !***********************************************************************
1761 
1762 !> Fill in a global array from domain-decomposed arrays.<br>
1763 !!
1764 !> <TT>mpp_global_field</TT> is used to get an entire
1765 !! domain-decomposed array on each PE. <TT>MPP_TYPE_</TT> can be of type
1766 !! <TT>complex</TT>, <TT>integer</TT>, <TT>logical</TT> or <TT>real</TT>;
1767 !! of 4-byte or 8-byte kind; of rank up to 5.<br>
1768 !!
1769 !! All PEs in a domain decomposition must call
1770 !! <TT>mpp_global_field</TT>, and each will have a complete global field
1771 !! at the end. Please note that a global array of rank 3 or higher could
1772 !! occupy a lot of memory.
1773 !!
1774 !! @param domain 2D domain
1775 !! @param local Data dimensioned on either the compute or data domains of 'domain'
1776 !! @param[out] global output data dimensioned on the corresponding global domain
1777 !! @param flags can be either XONLY or YONLY parameters to specify a globalization on one axis only
1778 !!
1779 !! <br> Example usage:
1780 !! @code{.F90}
1781 !! call mpp_global_field( domain, local, global, flags )
1782 !! @endcode
1783 !> @ingroup mpp_domains_mod
1785  module procedure mpp_global_field2d_r8_2d
1786  module procedure mpp_global_field2d_r8_3d
1787  module procedure mpp_global_field2d_r8_4d
1788  module procedure mpp_global_field2d_r8_5d
1789 #ifdef OVERLOAD_C8
1790  module procedure mpp_global_field2d_c8_2d
1791  module procedure mpp_global_field2d_c8_3d
1792  module procedure mpp_global_field2d_c8_4d
1793  module procedure mpp_global_field2d_c8_5d
1794 #endif
1795  module procedure mpp_global_field2d_i8_2d
1796  module procedure mpp_global_field2d_i8_3d
1797  module procedure mpp_global_field2d_i8_4d
1798  module procedure mpp_global_field2d_i8_5d
1799  module procedure mpp_global_field2d_l8_2d
1800  module procedure mpp_global_field2d_l8_3d
1801  module procedure mpp_global_field2d_l8_4d
1802  module procedure mpp_global_field2d_l8_5d
1803  module procedure mpp_global_field2d_r4_2d
1804  module procedure mpp_global_field2d_r4_3d
1805  module procedure mpp_global_field2d_r4_4d
1806  module procedure mpp_global_field2d_r4_5d
1807 #ifdef OVERLOAD_C4
1808  module procedure mpp_global_field2d_c4_2d
1809  module procedure mpp_global_field2d_c4_3d
1810  module procedure mpp_global_field2d_c4_4d
1811  module procedure mpp_global_field2d_c4_5d
1812 #endif
1813  module procedure mpp_global_field2d_i4_2d
1814  module procedure mpp_global_field2d_i4_3d
1815  module procedure mpp_global_field2d_i4_4d
1816  module procedure mpp_global_field2d_i4_5d
1817  module procedure mpp_global_field2d_l4_2d
1818  module procedure mpp_global_field2d_l4_3d
1819  module procedure mpp_global_field2d_l4_4d
1820  module procedure mpp_global_field2d_l4_5d
1821  end interface
1822 
1823 !> @ingroup mpp_domains_mod
1825  module procedure mpp_global_field2d_r8_2d_ad
1826  module procedure mpp_global_field2d_r8_3d_ad
1827  module procedure mpp_global_field2d_r8_4d_ad
1828  module procedure mpp_global_field2d_r8_5d_ad
1829 #ifdef OVERLOAD_C8
1830  module procedure mpp_global_field2d_c8_2d_ad
1831  module procedure mpp_global_field2d_c8_3d_ad
1832  module procedure mpp_global_field2d_c8_4d_ad
1833  module procedure mpp_global_field2d_c8_5d_ad
1834 #endif
1835  module procedure mpp_global_field2d_i8_2d_ad
1836  module procedure mpp_global_field2d_i8_3d_ad
1837  module procedure mpp_global_field2d_i8_4d_ad
1838  module procedure mpp_global_field2d_i8_5d_ad
1839  module procedure mpp_global_field2d_l8_2d_ad
1840  module procedure mpp_global_field2d_l8_3d_ad
1841  module procedure mpp_global_field2d_l8_4d_ad
1842  module procedure mpp_global_field2d_l8_5d_ad
1843  module procedure mpp_global_field2d_r4_2d_ad
1844  module procedure mpp_global_field2d_r4_3d_ad
1845  module procedure mpp_global_field2d_r4_4d_ad
1846  module procedure mpp_global_field2d_r4_5d_ad
1847 #ifdef OVERLOAD_C4
1848  module procedure mpp_global_field2d_c4_2d_ad
1849  module procedure mpp_global_field2d_c4_3d_ad
1850  module procedure mpp_global_field2d_c4_4d_ad
1851  module procedure mpp_global_field2d_c4_5d_ad
1852 #endif
1853  module procedure mpp_global_field2d_i4_2d_ad
1854  module procedure mpp_global_field2d_i4_3d_ad
1855  module procedure mpp_global_field2d_i4_4d_ad
1856  module procedure mpp_global_field2d_i4_5d_ad
1857  module procedure mpp_global_field2d_l4_2d_ad
1858  module procedure mpp_global_field2d_l4_3d_ad
1859  module procedure mpp_global_field2d_l4_4d_ad
1860  module procedure mpp_global_field2d_l4_5d_ad
1861  end interface
1862 
1863 !> Private helper interface used by @ref mpp_global_field
1864 !> @ingroup mpp_domains_mod
1866  module procedure mpp_do_global_field2d_r8_3d
1867 #ifdef OVERLOAD_C8
1868  module procedure mpp_do_global_field2d_c8_3d
1869 #endif
1870  module procedure mpp_do_global_field2d_i8_3d
1871  module procedure mpp_do_global_field2d_l8_3d
1872  module procedure mpp_do_global_field2d_r4_3d
1873 #ifdef OVERLOAD_C4
1874  module procedure mpp_do_global_field2d_c4_3d
1875 #endif
1876  module procedure mpp_do_global_field2d_i4_3d
1877  module procedure mpp_do_global_field2d_l4_3d
1878  end interface
1879 
1881  module procedure mpp_do_global_field2d_a2a_r8_3d
1882 #ifdef OVERLOAD_C8
1883  module procedure mpp_do_global_field2d_a2a_c8_3d
1884 #endif
1885  module procedure mpp_do_global_field2d_a2a_i8_3d
1886  module procedure mpp_do_global_field2d_a2a_l8_3d
1887  module procedure mpp_do_global_field2d_a2a_r4_3d
1888 #ifdef OVERLOAD_C4
1889  module procedure mpp_do_global_field2d_a2a_c4_3d
1890 #endif
1891  module procedure mpp_do_global_field2d_a2a_i4_3d
1892  module procedure mpp_do_global_field2d_a2a_l4_3d
1893  end interface
1894 
1895 !> Same functionality as @ref mpp_global_field but for unstructured domains
1896 !> @ingroup mpp_domains_mod
1898  module procedure mpp_global_field2d_ug_r8_2d
1899  module procedure mpp_global_field2d_ug_r8_3d
1900  module procedure mpp_global_field2d_ug_r8_4d
1901  module procedure mpp_global_field2d_ug_r8_5d
1902  module procedure mpp_global_field2d_ug_i8_2d
1903  module procedure mpp_global_field2d_ug_i8_3d
1904  module procedure mpp_global_field2d_ug_i8_4d
1905  module procedure mpp_global_field2d_ug_i8_5d
1906  module procedure mpp_global_field2d_ug_r4_2d
1907  module procedure mpp_global_field2d_ug_r4_3d
1908  module procedure mpp_global_field2d_ug_r4_4d
1909  module procedure mpp_global_field2d_ug_r4_5d
1910  module procedure mpp_global_field2d_ug_i4_2d
1911  module procedure mpp_global_field2d_ug_i4_3d
1912  module procedure mpp_global_field2d_ug_i4_4d
1913  module procedure mpp_global_field2d_ug_i4_5d
1914  end interface
1915 
1916 !> @ingroup mpp_domains_mod
1918  module procedure mpp_do_global_field2d_r8_3d_ad
1919 #ifdef OVERLOAD_C8
1920  module procedure mpp_do_global_field2d_c8_3d_ad
1921 #endif
1922  module procedure mpp_do_global_field2d_i8_3d_ad
1923  module procedure mpp_do_global_field2d_l8_3d_ad
1924  module procedure mpp_do_global_field2d_r4_3d_ad
1925 #ifdef OVERLOAD_C4
1926  module procedure mpp_do_global_field2d_c4_3d_ad
1927 #endif
1928  module procedure mpp_do_global_field2d_i4_3d_ad
1929  module procedure mpp_do_global_field2d_l4_3d_ad
1930  end interface
1931 
1932 !> Global max of domain-decomposed arrays.<br>
1933 !! \e mpp_global_max is used to get the maximum value of a
1934 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1935 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1936 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1937 !!<br>
1938 !! All PEs in a domain decomposition must call \e mpp_global_max,
1939 !! and each will have the result upon exit.
1940 !! The function \e mpp_global_min, with an identical syntax. is also available.
1941 !!
1942 !! @param domain 2D domain
1943 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1944 !! @param locus If present, van be used to retrieve the location of the maximum
1945 !!
1946 !! <br>Example usage:
1947 !! mpp_global_max( domain, field, locus )
1948 !> @ingroup mpp_domains_mod
1949  interface mpp_global_max
1950  module procedure mpp_global_max_r8_2d
1951  module procedure mpp_global_max_r8_3d
1952  module procedure mpp_global_max_r8_4d
1953  module procedure mpp_global_max_r8_5d
1954  module procedure mpp_global_max_r4_2d
1955  module procedure mpp_global_max_r4_3d
1956  module procedure mpp_global_max_r4_4d
1957  module procedure mpp_global_max_r4_5d
1958  module procedure mpp_global_max_i8_2d
1959  module procedure mpp_global_max_i8_3d
1960  module procedure mpp_global_max_i8_4d
1961  module procedure mpp_global_max_i8_5d
1962  module procedure mpp_global_max_i4_2d
1963  module procedure mpp_global_max_i4_3d
1964  module procedure mpp_global_max_i4_4d
1965  module procedure mpp_global_max_i4_5d
1966  end interface
1967 
1968 !> Global min of domain-decomposed arrays.<br>
1969 !! \e mpp_global_min is used to get the minimum value of a
1970 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1971 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1972 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1973 !!<br>
1974 !! All PEs in a domain decomposition must call \e mpp_global_min,
1975 !! and each will have the result upon exit.
1976 !! The function \e mpp_global_max, with an identical syntax. is also available.
1977 !!
1978 !! @param domain 2D domain
1979 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1980 !! @param locus If present, van be used to retrieve the location of the minimum
1981 !!
1982 !! <br>Example usage:
1983 !! mpp_global_min( domain, field, locus )
1984 !> @ingroup mpp_domains_mod
1985  interface mpp_global_min
1986  module procedure mpp_global_min_r8_2d
1987  module procedure mpp_global_min_r8_3d
1988  module procedure mpp_global_min_r8_4d
1989  module procedure mpp_global_min_r8_5d
1990  module procedure mpp_global_min_r4_2d
1991  module procedure mpp_global_min_r4_3d
1992  module procedure mpp_global_min_r4_4d
1993  module procedure mpp_global_min_r4_5d
1994  module procedure mpp_global_min_i8_2d
1995  module procedure mpp_global_min_i8_3d
1996  module procedure mpp_global_min_i8_4d
1997  module procedure mpp_global_min_i8_5d
1998  module procedure mpp_global_min_i4_2d
1999  module procedure mpp_global_min_i4_3d
2000  module procedure mpp_global_min_i4_4d
2001  module procedure mpp_global_min_i4_5d
2002  end interface
2003 
2004 !> Global sum of domain-decomposed arrays.<br>
2005 !! \e mpp_global_sum is used to get the sum of a domain-decomposed array
2006 !! on each PE. \e MPP_TYPE_ can be of type \e integer, \e complex, or \e real; of 4-byte or
2007 !! 8-byte kind; of rank up to 5.
2008 !!
2009 !! @param domain 2D domain
2010 !! @param field field data dimensioned on either the compute or data domain of 'domain'
2011 !! @param flags If present must have the value BITWISE_EXACT_SUM. This produces a sum that
2012 !! is guaranteed to produce the identical result irrespective of how the domain is decomposed.
2013 !! This method does the sum first along the ranks beyond 2, and then calls mpp_global_field
2014 !! to produce a global 2D array which is then summed. The default method, which is
2015 !! considerably faster, does a local sum followed by mpp_sum across the domain
2016 !! decomposition.
2017 !!
2018 !! <br>Example usage:
2019 !! call mpp_global_sum( domain, field, flags )
2020 !! @note All PEs in a domain decomposition must call \e mpp_global_sum,
2021 !! and each will have the result upon exit.
2022 !> @ingroup mpp_domains_mod
2023  interface mpp_global_sum
2024  module procedure mpp_global_sum_r8_2d
2025  module procedure mpp_global_sum_r8_3d
2026  module procedure mpp_global_sum_r8_4d
2027  module procedure mpp_global_sum_r8_5d
2028 #ifdef OVERLOAD_C8
2029  module procedure mpp_global_sum_c8_2d
2030  module procedure mpp_global_sum_c8_3d
2031  module procedure mpp_global_sum_c8_4d
2032  module procedure mpp_global_sum_c8_5d
2033 #endif
2034  module procedure mpp_global_sum_r4_2d
2035  module procedure mpp_global_sum_r4_3d
2036  module procedure mpp_global_sum_r4_4d
2037  module procedure mpp_global_sum_r4_5d
2038 #ifdef OVERLOAD_C4
2039  module procedure mpp_global_sum_c4_2d
2040  module procedure mpp_global_sum_c4_3d
2041  module procedure mpp_global_sum_c4_4d
2042  module procedure mpp_global_sum_c4_5d
2043 #endif
2044  module procedure mpp_global_sum_i8_2d
2045  module procedure mpp_global_sum_i8_3d
2046  module procedure mpp_global_sum_i8_4d
2047  module procedure mpp_global_sum_i8_5d
2048  module procedure mpp_global_sum_i4_2d
2049  module procedure mpp_global_sum_i4_3d
2050  module procedure mpp_global_sum_i4_4d
2051  module procedure mpp_global_sum_i4_5d
2052  end interface
2053 
2054 !gag
2055 !> @ingroup mpp_domains_mod
2057  module procedure mpp_global_sum_tl_r8_2d
2058  module procedure mpp_global_sum_tl_r8_3d
2059  module procedure mpp_global_sum_tl_r8_4d
2060  module procedure mpp_global_sum_tl_r8_5d
2061 #ifdef OVERLOAD_C8
2062  module procedure mpp_global_sum_tl_c8_2d
2063  module procedure mpp_global_sum_tl_c8_3d
2064  module procedure mpp_global_sum_tl_c8_4d
2065  module procedure mpp_global_sum_tl_c8_5d
2066 #endif
2067  module procedure mpp_global_sum_tl_r4_2d
2068  module procedure mpp_global_sum_tl_r4_3d
2069  module procedure mpp_global_sum_tl_r4_4d
2070  module procedure mpp_global_sum_tl_r4_5d
2071 #ifdef OVERLOAD_C4
2072  module procedure mpp_global_sum_tl_c4_2d
2073  module procedure mpp_global_sum_tl_c4_3d
2074  module procedure mpp_global_sum_tl_c4_4d
2075  module procedure mpp_global_sum_tl_c4_5d
2076 #endif
2077  module procedure mpp_global_sum_tl_i8_2d
2078  module procedure mpp_global_sum_tl_i8_3d
2079  module procedure mpp_global_sum_tl_i8_4d
2080  module procedure mpp_global_sum_tl_i8_5d
2081  module procedure mpp_global_sum_tl_i4_2d
2082  module procedure mpp_global_sum_tl_i4_3d
2083  module procedure mpp_global_sum_tl_i4_4d
2084  module procedure mpp_global_sum_tl_i4_5d
2085  end interface
2086 !gag
2087 
2088 !bnc
2089 !> @ingroup mpp_domains_mod
2091  module procedure mpp_global_sum_ad_r8_2d
2092  module procedure mpp_global_sum_ad_r8_3d
2093  module procedure mpp_global_sum_ad_r8_4d
2094  module procedure mpp_global_sum_ad_r8_5d
2095 #ifdef OVERLOAD_C8
2096  module procedure mpp_global_sum_ad_c8_2d
2097  module procedure mpp_global_sum_ad_c8_3d
2098  module procedure mpp_global_sum_ad_c8_4d
2099  module procedure mpp_global_sum_ad_c8_5d
2100 #endif
2101  module procedure mpp_global_sum_ad_r4_2d
2102  module procedure mpp_global_sum_ad_r4_3d
2103  module procedure mpp_global_sum_ad_r4_4d
2104  module procedure mpp_global_sum_ad_r4_5d
2105 #ifdef OVERLOAD_C4
2106  module procedure mpp_global_sum_ad_c4_2d
2107  module procedure mpp_global_sum_ad_c4_3d
2108  module procedure mpp_global_sum_ad_c4_4d
2109  module procedure mpp_global_sum_ad_c4_5d
2110 #endif
2111  module procedure mpp_global_sum_ad_i8_2d
2112  module procedure mpp_global_sum_ad_i8_3d
2113  module procedure mpp_global_sum_ad_i8_4d
2114  module procedure mpp_global_sum_ad_i8_5d
2115  module procedure mpp_global_sum_ad_i4_2d
2116  module procedure mpp_global_sum_ad_i4_3d
2117  module procedure mpp_global_sum_ad_i4_4d
2118  module procedure mpp_global_sum_ad_i4_5d
2119  end interface
2120 !bnc
2121 
2122 !***********************************************************************
2123 !
2124 ! public interface from mpp_domain_util.h
2125 !
2126 !***********************************************************************
2127  !> @brief Retrieve PE number of a neighboring domain.
2128  !!
2129  !> Given a 1-D or 2-D domain decomposition, this call allows users to retrieve
2130  !! the PE number of an adjacent PE-domain while taking into account that the
2131  !! domain may have holes (masked) and/or have cyclic boundary conditions and/or a
2132  !! folded edge. Which PE-domain will be retrived will depend on "direction":
2133  !! +1 (right) or -1 (left) for a 1-D domain decomposition and either NORTH, SOUTH,
2134  !! EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST, or NORTH_WEST for a 2-D
2135  !! decomposition. If no neighboring domain exists (masked domain), then the
2136  !! returned "pe" value will be set to NULL_PE.<br>
2137  !! <br>Example usage:
2138  !!
2139  !! call mpp_get_neighbor_pe( domain1d, direction=+1 , pe)
2140  !!
2141  !! Set pe to the neighbor pe number that is to the right of the current pe
2142  !!
2143  !! call mpp_get_neighbor_pe( domain2d, direction=NORTH, pe)
2144  !!
2145  !! Get neighbor pe number that's above/north of the current pe
2146  !> @ingroup mpp_domains_mod
2148  module procedure mpp_get_neighbor_pe_1d
2149  module procedure mpp_get_neighbor_pe_2d
2150  end interface
2151 
2152  !> Equality/inequality operators for domaintypes. <br>
2153  !!
2154  !! <br>The module provides public operators to check for
2155  !! equality/inequality of domaintypes, e.g:<br>
2156  !!
2157  !! type(domain1D) :: a, b
2158  !! type(domain2D) :: c, d
2159  !! ...
2160  !! if( a.NE.b )then
2161  !! ...
2162  !! end if
2163  !! if( c==d )then
2164  !! ...
2165  !! end if
2166  !!<br>
2167  !! Domains are considered equal if and only if the start and end
2168  !! indices of each of their component global, data and compute domains are equal.
2169  !> @ingroup mpp_domains_mod
2170  interface operator(.EQ.)
2171  module procedure mpp_domain1d_eq
2172  module procedure mpp_domain2d_eq
2173  module procedure mpp_domainug_eq
2174  end interface
2175 
2176  !> @ingroup mpp_domains_mod
2177  interface operator(.NE.)
2178  module procedure mpp_domain1d_ne
2179  module procedure mpp_domain2d_ne
2180  module procedure mpp_domainug_ne
2181  end interface
2182 
2183  !> These routines retrieve the axis specifications associated with the compute domains.
2184  !! The domain is a derived type with private elements. These routines
2185  !! retrieve the axis specifications associated with the compute domains
2186  !! The 2D version of these is a simple extension of 1D.
2187  !! <br>Example usage:
2188  !!
2189  !! call mpp_get_compute_domain(domain_1D, is, ie)
2190  !! call mpp_get_compute_domain(domain_2D, is, ie, js, je)
2191  !> @ingroup mpp_domains_mod
2193  module procedure mpp_get_compute_domain1d
2194  module procedure mpp_get_compute_domain2d
2195  end interface
2196 
2197  !> Retrieve the entire array of compute domain extents associated with a decomposition.
2198  !!
2199  !> @param domain 2D domain
2200  !> @param[out] xbegin,ybegin x and y domain starting indices
2201  !> @param[out] xsize,ysize x and y domain sizes
2202  !! <br>Example usage:
2203  !!
2204  !! call mpp_get_compute_domains( domain, xbegin, xend, xsize, &
2205  !! ybegin, yend, ysize )
2206  !> @ingroup mpp_domains_mod
2208  module procedure mpp_get_compute_domains1d
2209  module procedure mpp_get_compute_domains2d
2210  end interface
2211 
2212  !> @ingroup mpp_domains_mod
2214  module procedure mpp_get_global_domains1d
2215  module procedure mpp_get_global_domains2d
2216  end interface
2217 
2218  !> These routines retrieve the axis specifications associated with the data domains.
2219  !! The domain is a derived type with private elements. These routines
2220  !! retrieve the axis specifications associated with the data domains.
2221  !! The 2D version of these is a simple extension of 1D.
2222  !! <br>Example usage:
2223  !!
2224  !! call mpp_get_data_domain(domain_1d, isd, ied)
2225  !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2226  !> @ingroup mpp_domains_mod
2228  module procedure mpp_get_data_domain1d
2229  module procedure mpp_get_data_domain2d
2230  end interface
2231 
2232  !> These routines retrieve the axis specifications associated with the global domains.
2233  !! The domain is a derived type with private elements. These routines
2234  !! retrieve the axis specifications associated with the global domains.
2235  !! The 2D version of these is a simple extension of 1D.
2236  !! <br>Example usage:
2237  !!
2238  !! call mpp_get_global_domain(domain_1d, isg, ieg)
2239  !! call mpp_get_global_domain(domain_2d, isg, ieg, jsg, jeg)
2240  !> @ingroup mpp_domains_mod
2242  module procedure mpp_get_global_domain1d
2243  module procedure mpp_get_global_domain2d
2244  end interface
2245 
2246  !> These routines retrieve the axis specifications associated with the memory domains.
2247  !! The domain is a derived type with private elements. These routines
2248  !! retrieve the axis specifications associated with the memory domains.
2249  !! The 2D version of these is a simple extension of 1D.
2250  !! <br>Example usage:
2251  !!
2252  !! call mpp_get_memory_domain(domain_1d, ism, iem)
2253  !! call mpp_get_memory_domain(domain_2d, ism, iem, jsm, jem)
2254  !> @ingroup mpp_domains_mod
2256  module procedure mpp_get_memory_domain1d
2257  module procedure mpp_get_memory_domain2d
2258  end interface
2259 
2260  !> @ingroup mpp_domains_mod
2262  module procedure mpp_get_domain_extents1d
2263  module procedure mpp_get_domain_extents2d
2264  end interface
2265 
2266  !> These routines set the axis specifications associated with the compute domains.
2267  !! The domain is a derived type with private elements. These routines
2268  !! set the axis specifications associated with the compute domains
2269  !! The 2D version of these is a simple extension of 1D.
2270  !! <br>Example usage:
2271  !!
2272  !! call mpp_get_data_domain(domain_1d, isd, ied)
2273  !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2274  !> @ingroup mpp_domains_mod
2276  module procedure mpp_set_compute_domain1d
2277  module procedure mpp_set_compute_domain2d
2278  end interface
2279 
2280  !> These routines set the axis specifications associated with the data domains.
2281  !! The domain is a derived type with private elements. These routines
2282  !! set the axis specifications associated with the data domains.
2283  !! The 2D version of these is a simple extension of 1D.
2284  !! <br>Example usage:
2285  !!
2286  !! call mpp_set_data_domain(domain_1d, isd, ied)
2287  !! call mpp_set_data_domain(domain_2d, isd, ied, jsd, jed)
2288  !> @ingroup mpp_domains_mod
2290  module procedure mpp_set_data_domain1d
2291  module procedure mpp_set_data_domain2d
2292  end interface
2293 
2294  !> These routines set the axis specifications associated with the global domains.
2295  !! The domain is a derived type with private elements. These routines
2296  !! set the axis specifications associated with the global domains.
2297  !! The 2D version of these is a simple extension of 1D.
2298  !! <br>Example usage:
2299  !!
2300  !! call mpp_set_global_domain(domain_1d, isg, ieg)
2301  !! call mpp_set_global_domain(domain_2d, isg, ieg, jsg, jeg)
2302  !> @ingroup mpp_domains_mod
2304  module procedure mpp_set_global_domain1d
2305  module procedure mpp_set_global_domain2d
2306  end interface
2307 
2308  !> Retrieve list of PEs associated with a domain decomposition.
2309  !! The 1D version of this call returns an array of the PEs assigned to
2310  !! this 1D domain decomposition. In addition the optional argument pos may be
2311  !! used to retrieve the 0-based position of the domain local to the
2312  !! calling PE, i.e., <TT> domain\%list(pos)\%pe</TT> is the local PE,
2313  !! as returned by @ref mpp_pe().
2314  !! The 2D version of this call is identical to 1D version.
2315  !> @ingroup mpp_domains_mod
2316  interface mpp_get_pelist
2317  module procedure mpp_get_pelist1d
2318  module procedure mpp_get_pelist2d
2319  end interface
2320 
2321  !> Retrieve layout associated with a domain decomposition
2322  !! The 1D version of this call returns the number of divisions that was assigned to this
2323  !! decomposition axis. The 2D version of this call returns an array of dimension 2 holding the
2324  !! results on two axes.
2325  !! <br>Example usage:
2326  !!
2327  !! call mpp_get_layout( domain, layout )
2328  !> @ingroup mpp_domains_mod
2329  interface mpp_get_layout
2330  module procedure mpp_get_layout1d
2331  module procedure mpp_get_layout2d
2332  end interface
2333  !> Private interface for internal usage, compares two sizes
2334  !> @ingroup mpp_domains_mod
2336  module procedure check_data_size_1d
2337  module procedure check_data_size_2d
2338  end interface
2339 
2340  !> Nullify domain list. This interface is needed in mpp_domains_test.
2341  !! 1-D case can be added in if needed.
2342  !! <br>Example usage:
2343  !!
2344  !! call mpp_nullify_domain_list(domain)
2345  !> @ingroup mpp_domains_mod
2347  module procedure nullify_domain2d_list
2348  end interface
2349 
2350  ! Include variable "version" to be written to log file.
2351 #include<file_version.h>
2352  public version
2353 
2354 
2355 contains
2356 
2357 #include <mpp_define_nest_domains.inc>
2358 #include <mpp_domains_util.inc>
2359 #include <mpp_domains_comm.inc>
2360 #include <mpp_domains_define.inc>
2361 #include <mpp_domains_misc.inc>
2362 #include <mpp_domains_reduce.inc>
2363 #include <mpp_unstruct_domain.inc>
2364 
2365 end module mpp_domains_mod
subroutine mpp_get_overlap(domain, action, p, is, ie, js, je, dir, rot, position)
Set user stack size.
subroutine mpp_get_neighbor_pe_2d(domain, direction, pe)
Return PE North/South/East/West of this PE-domain. direction must be NORTH, SOUTH,...
subroutine mpp_get_global_domains1d(domain, begin, end, size)
Set user stack size.
integer function mpp_get_domain_npes(domain)
Set user stack size.
subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
integer, save a2_sort_len
length sorted memory list
subroutine mpp_define_nest_domains(nest_domain, domain, num_nest, nest_level, tile_fine, tile_coarse, istart_coarse, icount_coarse, jstart_coarse, jcount_coarse, npes_nest_tile, x_refine, y_refine, extra_halo, name)
Set up a domain to pass data between aligned coarse and fine grid of nested model.
logical function mpp_domainug_ne(a, b)
Overload the .ne. for UG.
subroutine mpp_modify_domain2d(domain_in, domain_out, isc, iec, jsc, jec, isg, ieg, jsg, jeg, whalo, ehalo, shalo, nhalo)
logical function mpp_domain2d_eq(a, b)
Set user stack size.
integer function mpp_get_tile_npes(domain)
Returns number of processors used on current tile.
integer, dimension(-1:max_dom_ids), save ids_idx
index of d_comm associated with sorted addesses
integer nthread_control_loop
Determine the loop order for packing and unpacking. When number of threads is greater than nthread_co...
integer, save i_sort_len
length sorted domain ids list
subroutine mpp_get_global_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, tile_count, position)
Set user stack size.
subroutine mpp_get_pelist1d(domain, pelist, pos)
Set user stack size.
subroutine mpp_set_global_domain1d(domain, begin, end, size)
Set user stack size.
subroutine mpp_get_layout2d(domain, layout)
Set user stack size.
subroutine mpp_get_tile_pelist(domain, pelist)
Get the processors list used on current tile.
logical efp_sum_overflow_check
If .true., always do overflow_check when doing EFP bitwise mpp_global_sum.
subroutine mpp_get_domain_components(domain, x, y, tile_count)
Retrieve 1D components of 2D decomposition.
subroutine mpp_get_domain_extents1d(domain, xextent, yextent)
Set user stack size.
logical function mpp_group_update_is_set(group)
Set user stack size.
subroutine logical mpp_broadcast_domain_1(domain)
broadcast domain (useful only outside the context of its own pelist)
integer function mpp_get_domain_tile_commid(domain)
Set user stack size.
character(len=32) debug_update_domain
namelist interface
subroutine mpp_domains_init(flags)
Initialize domain decomp package.
subroutine mpp_set_global_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, tile_count)
Set user stack size.
logical function mpp_domain1d_eq(a, b)
Set user stack size.
integer function, dimension(size(domain%tile_id(:))) mpp_get_tile_id(domain)
Returns the tile_id on current pe.
logical function mpp_domain_is_symmetry(domain)
Set user stack size.
subroutine mpp_create_super_grid_domain(domain)
Modifies the indices of the input domain to create the supergrid domain.
logical function mpp_mosaic_defined()
Accessor function for value of mosaic_defined.
integer function mpp_get_num_overlap(domain, action, p, position)
Set user stack size.
integer function mpp_get_current_ntile(domain)
Returns number of tile on current pe.
subroutine mpp_define_domains1d(global_indices, ndivs, domain, pelist, flags, halo, extent, maskmap, memory_size, begin_halo, end_halo)
Define data and computational domains on a 1D set of data (isg:ieg) and assign them to PEs.
subroutine mpp_set_domain_symmetry(domain, symmetry)
Set user stack size.
integer function mpp_get_ntile_count(domain)
Returns number of tiles in mosaic.
subroutine mpp_get_domain_extents2d(domain, xextent, yextent)
This will return xextent and yextent for each tile.
logical debug_message_passing
Will check the consistency on the boundary between processor/tile when updating domain for symmetric ...
subroutine mpp_get_global_domain1d(domain, begin, end, size, max_size)
Set user stack size.
integer function, dimension(2) mpp_get_io_domain_layout(domain)
Set user stack size.
subroutine mpp_shift_nest_domains(nest_domain, domain, delta_i_coarse, delta_j_coarse, extra_halo)
Based on mpp_define_nest_domains, but just resets positioning of nest Modifies the parent/coarse star...
subroutine mpp_get_neighbor_pe_1d(domain, direction, pe)
Return PE to the righ/left of this PE-domain.
subroutine mpp_define_mosaic_pelist(sizes, pe_start, pe_end, pelist, costpertile)
Defines a pelist for use with mosaic tiles.
subroutine mpp_get_tile_compute_domains(domain, xbegin, xend, ybegin, yend, position)
Set user stack size.
subroutine mpp_get_compute_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, tile_count, position)
Set user stack size.
subroutine mpp_get_compute_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
subroutine mpp_define_io_domain(domain, io_layout)
Define the layout for IO pe's for the given domain.
subroutine mpp_get_f2c_index_coarse(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, nest_level, position)
subroutine mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
Computes extents for a grid decomposition with the given indices and divisions.
integer, dimension(-1:max_fields), save d_comm_idx
index of d_comm associated with sorted addresses
logical function mpp_domain1d_ne(a, b)
Set user stack size.
subroutine mpp_modify_domain1d(domain_in, domain_out, cbegin, cend, gbegin, gend, hbegin, hend)
Modifies the exents of a domain.
subroutine mpp_get_layout1d(domain, layout)
Set user stack size.
subroutine mpp_broadcast_domain_ug(domain)
Broadcast domain (useful only outside the context of its own pelist)
subroutine mpp_get_update_pelist(domain, action, pelist, position)
Set user stack size.
subroutine mpp_set_data_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, x_is_global, y_is_global, tile_count)
Set user stack size.
subroutine mpp_get_compute_domains1d(domain, begin, end, size)
Set user stack size.
integer(i8_kind), parameter addr2_base
= 0x0000000000010000
integer function mpp_get_domain_tile_root_pe(domain)
Set user stack size.
subroutine mpp_check_field_2d(field_in, pelist1, pelist2, domain, mesg, w_halo, s_halo, e_halo, n_halo, force_abort, position)
This routine is used to do parallel checking for 2d data between n and m pe. The comparison is is don...
integer, dimension(-1:max_addrs2), save addrs2_idx
index of addr2 associated with d_comm
subroutine mpp_define_mosaic(global_indices, layout, domain, num_tile, num_contact, tile1, tile2, istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, pe_start, pe_end, pelist, whalo, ehalo, shalo, nhalo, xextent, yextent, maskmap, name, memory_size, symmetry, xflags, yflags, tile_id)
Defines a domain for mosaic tile grids.
integer, save n_comm
number of communicators used
subroutine mpp_domains_exit()
Exit mpp_domains_mod. Serves no particular purpose, but is provided should you require to re-initiali...
subroutine mpp_get_pelist2d(domain, pelist, pos)
Set user stack size.
integer, save n_ids
number of domain ids used (=i_sort_len; domain ids are never removed)
integer, save a_sort_len
length sorted memory list
subroutine mpp_get_memory_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, position)
Set user stack size.
subroutine mpp_check_field_3d(field_in, pelist1, pelist2, domain, mesg, w_halo, s_halo, e_halo, n_halo, force_abort, position)
This routine is used to do parallel checking for 3d data between n and m pe. The comparison is is don...
subroutine mpp_define_layout2d(global_indices, ndivs, layout)
subroutine mpp_set_compute_domain1d(domain, begin, end, size, is_global)
Set user stack size.
integer function mpp_get_domain_pe(domain)
Set user stack size.
subroutine mpp_get_tile_list(domain, tiles)
Return the tile_id on current pelist. one-tile-per-pe is assumed.
subroutine mpp_get_f2c_index_fine(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, is_fine, ie_fine, js_fine, je_fine, nest_level, position)
subroutine mpp_compute_block_extent(isg, ieg, ndivs, ibegin, iend)
Computes the extents of a grid block.
logical function mpp_group_update_initialized(group)
Set user stack size.
integer, save dc_sort_len
length sorted comm keys (=num active communicators)
subroutine mpp_set_compute_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, x_is_global, y_is_global, tile_count)
Set user stack size.
type(domain2d) function, pointer mpp_get_io_domain(domain)
Set user stack size.
subroutine mpp_get_compute_domains2d(domain, xbegin, xend, xsize, ybegin, yend, ysize, position)
Set user stack size.
subroutine mpp_copy_domain2d(domain_in, domain_out)
Copies input 2d domain to the output 2d domain.
integer(i8_kind), dimension(max_fields), save dckey_sorted
list of sorted local addresses
subroutine mpp_get_memory_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
type(domaincommunicator2d), dimension(:), allocatable, target, save d_comm
domain communicators
subroutine mpp_get_domain_shift(domain, ishift, jshift, position)
Returns the shift value in x and y-direction according to domain position..
logical function mpp_domain2d_ne(a, b)
Set user stack size.
logical function mpp_domain_is_initialized(domain)
Set user stack size.
subroutine mpp_broadcast_domain_nest_coarse(domain, tile_coarse)
Broadcast nested domain (useful only outside the context of its own pelist)
subroutine nullify_domain2d_list(domain)
Set user stack size.
integer, dimension(-1:max_addrs), save addrs_idx
index of address associated with d_comm
logical function mpp_domain_is_tile_root_pe(domain)
Returns if current pe is the root pe of the tile, if number of tiles on current pe is greater than 1,...
character(len=name_length) function mpp_get_domain_name(domain)
Set user stack size.
subroutine mpp_get_global_domains2d(domain, xbegin, xend, xsize, ybegin, yend, ysize, position)
Set user stack size.
subroutine mpp_get_c2f_index(nest_domain, is_fine, ie_fine, js_fine, je_fine, is_coarse, ie_coarse, js_coarse, je_coarse, dir, nest_level, position)
Get the index of the data passed from coarse grid to fine grid.
integer function mpp_get_domain_commid(domain)
Set user stack size.
integer, save n_addrs2
number of memory addresses used
subroutine mpp_get_data_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, tile_count, position)
Set user stack size.
subroutine mpp_get_domain_pelist(domain, pelist)
Set user stack size.
integer(i8_kind), dimension(max_addrs2), save addrs2_sorted
list of sorted local addresses
subroutine mpp_domains_set_stack_size(n)
Set user stack size.
subroutine mpp_clear_group_update(group)
Set user stack size.
subroutine mpp_get_update_size(domain, nsend, nrecv, position)
Set user stack size.
integer(i8_kind), dimension(max_addrs), save addrs_sorted
list of sorted local addresses
integer(i8_kind), dimension(max_dom_ids), save ids_sorted
list of sorted domain identifiers
integer function mpp_get_domain_root_pe(domain)
Set user stack size.
logical function mpp_domainug_eq(a, b)
Overload the .eq. for UG.
subroutine mpp_set_data_domain1d(domain, begin, end, size, is_global)
Set user stack size.
recursive subroutine mpp_copy_domain1d(domain_in, domain_out)
Copies input 1d domain to the output 1d domain.
integer, save n_addrs
number of memory addresses used
subroutine mpp_get_data_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
subroutine mpp_define_domains2d(global_indices, layout, domain, pelist, xflags, yflags, xhalo, yhalo, xextent, yextent, maskmap, name, symmetry, memory_size, whalo, ehalo, shalo, nhalo, is_mosaic, tile_count, tile_id, complete, x_cyclic_offset, y_cyclic_offset)
Define 2D data and computational domain on global rectilinear cartesian domain (isg:ieg,...
Private interface for internal usage, compares two sizes.
Broadcasts domain to every pe. Only useful outside the context of it's own pelist.
Parallel checking between two ensembles which run on different set pes at the same time There are tw...
Private interface used for non blocking updates.
Completes a pending non-blocking group update Must follow a call to mpp_start_group_update.
Must be used after a call to mpp_start_update_domains in order to complete a nonblocking domain updat...
Constructor for the mpp_group_update_type which is then used with mpp_start_group_update.
Deallocate given 1D or 2D domain.
Set up a domain decomposition.
Retrieve layout associated with a domain decomposition. Given a global 2D domain and the number of di...
Defines a nullified 1D or 2D domain.
Private interface to updates data domain of 3D field whose computational domains have been computed.
Private helper interface used by mpp_global_field.
Private interface used for mpp_update_domains.
Passes a data field from a unstructured grid to an structured grid Example usage:
Used by mpp_update_nest_coarse to perform domain updates.
Get the boundary data for symmetric domain when the data is at C, E, or N-cell center....
These routines retrieve the axis specifications associated with the compute domains....
Retrieve the entire array of compute domain extents associated with a decomposition.
These routines retrieve the axis specifications associated with the data domains. The domain is a der...
Get the index of the data passed from fine grid to coarse grid. Example usage:
These routines retrieve the axis specifications associated with the global domains....
Retrieve layout associated with a domain decomposition The 1D version of this call returns the number...
These routines retrieve the axis specifications associated with the memory domains....
Retrieve PE number of a neighboring domain.
Retrieve list of PEs associated with a domain decomposition. The 1D version of this call returns an a...
Fill in a global array from domain-decomposed arrays.
Same functionality as mpp_global_field but for unstructured domains.
Global max of domain-decomposed arrays. mpp_global_max is used to get the maximum value of a domain-...
Global min of domain-decomposed arrays. mpp_global_min is used to get the minimum value of a domain-...
Global sum of domain-decomposed arrays. mpp_global_sum is used to get the sum of a domain-decomposed...
Modifies the extents (compute, data and global) of a given domain.
Nullify domain list. This interface is needed in mpp_domains_test. 1-D case can be added in if needed...
Passes data from a structured grid to an unstructured grid Example usage:
Passes a data field from a structured grid to an unstructured grid Example usage:
Reorganization of distributed global arrays. mpp_redistribute is used to reorganize a distributed ar...
These routines set the axis specifications associated with the compute domains. The domain is a deriv...
These routines set the axis specifications associated with the data domains. The domain is a derived ...
These routines set the axis specifications associated with the global domains. The domain is a derive...
Private interface used for non blocking updates.
Starts non-blocking group update Must be followed up with a call to mpp_complete_group_update mpp_gro...
Interface to start halo updates mpp_start_update_domains is used to start a halo update of a domain-d...
Performs halo updates for a given domain.
Similar to mpp_update_domains , updates adjoint domains.
Pass the data from fine grid to fill the buffer to be ready to be interpolated onto coarse grid....
Pass the data from coarse grid to fill the buffer to be ready to be interpolated onto fine grid....
Type used to represent the contact between tiles.
One dimensional domain used to manage shared data access between pes.
A private type used to specify index limits for a domain decomposition.
The domain2D type contains all the necessary information to define the global, compute and data domai...
Private type to specify multiple index limits and pe information for a 2D domain.
Used to specify index limits along an axis of a domain.
Used for sending domain data between pe's.
Domain information for managing data on unstructured grids.
index bounds for use in nestSpec
used for updates on a group
domain with nested fine and course tiles
Private type to hold data for each level of nesting.
Used to specify bounds and index information for nested tiles as a linked list.
Used for nonblocking data transfer.
Type for overlapping data.
Private type for overlap specifications.
Upper and lower x and y bounds for a tile.
Private type for axis specification data for an unstructured grid.
Private type for axis specification data for an unstructured domain.
This interface uses a conversion to an integer representation of real numbers to give order-invariant...
Definition: mpp_efp.F90:71
subroutine, public mpp_memuse_end(text, unit)
End the memory collection, and report on total memory used during the execution of the model run.
subroutine, public mpp_memuse_begin
Initialize the memory module, and record the initial memory use.
subroutine mpp_sync_self(pelist, check, request, msg_size, msg_type)
This is to check if current PE's outstanding puts are complete but we can't use shmem_fence because w...
integer function stdout()
This function returns the current standard fortran unit numbers for output.
Definition: mpp_util.inc:43
subroutine mpp_set_current_pelist(pelist, no_sync)
Set context pelist.
Definition: mpp_util.inc:490
subroutine mpp_init(flags, localcomm, test_level, alt_input_nml_path)
Initialize the mpp_mod module. Must be called before any usage.
integer function stderr()
This function returns the current standard fortran unit numbers for error messages.
Definition: mpp_util.inc:51
subroutine mpp_type_free(dtype)
Deallocates memory for mpp_type objects @TODO This should probably not take a pointer,...
subroutine mpp_declare_pelist(pelist, name, commID)
Declare a pelist.
Definition: mpp_util.inc:461
integer function stdlog()
This function returns the current standard fortran unit numbers for log messages. Log messages,...
Definition: mpp_util.inc:59
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:421
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:407
subroutine mpp_sync(pelist, do_self)
Synchronize PEs in list.
integer function mpp_clock_id(name, flags, grain)
Return an ID for a new or existing clock.
Definition: mpp_util.inc:705
Scatter a vector across all PEs.
Definition: mpp.F90:762
Perform parallel broadcasts.
Definition: mpp.F90:1091
Error handler.
Definition: mpp.F90:382
Reduction operations. Find the max of scalar a from the PEs in pelist result is also automatically br...
Definition: mpp.F90:538
Reduction operations. Find the min of scalar a from the PEs in pelist result is also automatically br...
Definition: mpp.F90:560
Recieve data from another PE.
Definition: mpp.F90:937
Send data to a receiving PE.
Definition: mpp.F90:1004
Reduction operation.
Definition: mpp.F90:597
Calculates sum of a given numerical array across pe's for adjoint domains.
Definition: mpp.F90:642
Basic message-passing call.
Definition: mpp.F90:872
Create a mpp_type variable.
Definition: mpp.F90:510
Data types for generalized data transfer (e.g. MPI_Type)
Definition: mpp.F90:287