FMS  2025.04
Flexible Modeling System
mpp_domains.F90
1 !***********************************************************************
2 !* Apache License 2.0
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* Licensed under the Apache License, Version 2.0 (the "License");
7 !* you may not use this file except in compliance with the License.
8 !* You may obtain a copy of the License at
9 !*
10 !* http://www.apache.org/licenses/LICENSE-2.0
11 !*
12 !* FMS is distributed in the hope that it will be useful, but WITHOUT
13 !* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied;
14 !* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 !* PARTICULAR PURPOSE. See the License for the specific language
16 !* governing permissions and limitations under the License.
17 !***********************************************************************
18 !-----------------------------------------------------------------------
19 !> @defgroup mpp_domains_mod mpp_domains_mod
20 !> @ingroup mpp
21 !> @brief Domain decomposition and domain update for message-passing codes
22 !> @author V. Balaji SGI/GFDL Princeton University
23 !!
24 !> A set of simple calls for domain
25 !! decomposition and domain updates on rectilinear grids. It requires the
26 !! module mpp.F90, upon which it is built.\n
27 !! Scalable implementations of finite-difference codes are generally
28 !! based on decomposing the model domain into subdomains that are
29 !! distributed among processors. These domains will then be obliged to
30 !! exchange data at their boundaries if data dependencies are merely
31 !! nearest-neighbour, or may need to acquire information from the global
32 !! domain if there are extended data dependencies, as in the spectral
33 !! transform. The domain decomposition is a key operation in the
34 !! development of parallel codes.\n
35 !!\n
36 !! mpp_domains_mod provides a domain decomposition and domain
37 !! update API for rectilinear grids, built on top of the mpp_mod API for message passing.
38 !! Features of mpp_domains_mod include:\n
39 !!\n
40 !! Simple, minimal API, with free access to underlying API for more complicated stuff.\n
41 !!\n
42 !! Design toward typical use in climate/weather CFD codes.\n
43 !!
44 !> @par[Domains]
45 !! It is assumed that domain decomposition will mainly be in 2
46 !! horizontal dimensions, which will in general be the two
47 !! fastest-varying indices. There is a separate implementation of 1D
48 !! decomposition on the fastest-varying index, and 1D decomposition on
49 !! the second index, treated as a special case of 2D decomposition, is
50 !! also possible. We define domain as the grid associated with a <I>task</I>.
51 !! We define the compute domain as the set of gridpoints that are
52 !! computed by a task, and the data domain as the set of points
53 !! that are required by the task for the calculation. There can in
54 !! general be more than 1 task per PE, though often
55 !! the number of domains is the same as the processor count. We define
56 !! the global domain as the global computational domain of the
57 !! entire model (i.e, the same as the computational domain if run on a
58 !! single processor). 2D domains are defined using a derived type domain2D,
59 !! constructed as follows (see comments in code for more details).
60 !!
61 !! type, public :: domain_axis_spec
62 !! private
63 !! integer :: begin, end, size, max_size
64 !! logical :: is_global
65 !! end type domain_axis_spec
66 !!
67 !! type, public :: domain1D
68 !! private
69 !! type(domain_axis_spec) :: compute, data, global, active
70 !! logical :: mustputb, mustgetb, mustputf, mustgetf, folded
71 !! type(domain1D), pointer, dimension(:) :: list
72 !! integer :: pe ! pe to which the domain is assigned
73 !! integer :: pos
74 !! end type domain1D
75 !!
76 !! type, public :: domain2D
77 !! private
78 !! type(domain1D) :: x
79 !! type(domain1D) :: y
80 !! type(domain2D), pointer, dimension(:) :: list
81 !! integer :: pe ! PE to which this domain is assigned
82 !! integer :: pos
83 !! end type domain2D
84 !!
85 !! type(domain1D), public :: NULL_DOMAIN1D
86 !! type(domain2D), public :: NULL_DOMAIN2D
87 
88 !> @addtogroup mpp_domains_mod
89 !> @{
90 
91 module mpp_domains_mod
92 
93 #if defined(use_libMPI)
94  use mpi
95 #endif
96 
97  use mpp_parameter_mod, only : mpp_debug, mpp_verbose, mpp_domain_time
98  use mpp_parameter_mod, only : global_data_domain, cyclic_global_domain, global,cyclic
99  use mpp_parameter_mod, only : agrid, bgrid_sw, bgrid_ne, cgrid_ne, cgrid_sw, dgrid_ne, dgrid_sw
100  use mpp_parameter_mod, only : fold_west_edge, fold_east_edge, fold_south_edge, fold_north_edge
101  use mpp_parameter_mod, only : wupdate, eupdate, supdate, nupdate, xupdate, yupdate
102  use mpp_parameter_mod, only : non_bitwise_exact_sum, bitwise_exact_sum, mpp_domain_time
103  use mpp_parameter_mod, only : center, corner, scalar_pair, scalar_bit, bitwise_efp_sum
104  use mpp_parameter_mod, only : north, north_east, east, south_east
105  use mpp_parameter_mod, only : south, south_west, west, north_west
106  use mpp_parameter_mod, only : max_domain_fields, null_pe, domain_id_base
107  use mpp_parameter_mod, only : zero, ninety, minus_ninety, one_hundred_eighty, max_tiles
108  use mpp_parameter_mod, only : event_send, event_recv, root_global
109  use mpp_parameter_mod, only : nonblock_update_tag, edgeonly, edgeupdate
110  use mpp_parameter_mod, only : nonsymedge, nonsymedgeupdate
111  use mpp_data_mod, only : mpp_domains_stack, ptr_domains_stack
112  use mpp_data_mod, only : mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
113  use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_error, fatal, warning, note
115  use mpp_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end
116  use mpp_mod, only : mpp_max, mpp_min, mpp_sum, mpp_get_current_pelist, mpp_broadcast
117  use mpp_mod, only : mpp_sum_ad
118  use mpp_mod, only : mpp_sync, mpp_init, lowercase
119  use mpp_mod, only : input_nml_file, mpp_alltoall
120  use mpp_mod, only : mpp_type, mpp_byte
121  use mpp_mod, only : mpp_type_create, mpp_type_free
122  use mpp_mod, only : comm_tag_1, comm_tag_2, comm_tag_3, comm_tag_4
123  use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist
124  use mpp_memutils_mod, only : mpp_memuse_begin, mpp_memuse_end
125  use mpp_efp_mod, only : mpp_reproducing_sum
126  use platform_mod
127  implicit none
128  private
129 
130  !--- public parameters imported from mpp_domains_parameter_mod
131  public :: global_data_domain, cyclic_global_domain, bgrid_ne, bgrid_sw, cgrid_ne, cgrid_sw, agrid
132  public :: dgrid_ne, dgrid_sw, fold_west_edge, fold_east_edge, fold_south_edge, fold_north_edge
133  public :: wupdate, eupdate, supdate, nupdate, xupdate, yupdate
134  public :: non_bitwise_exact_sum, bitwise_exact_sum, mpp_domain_time, bitwise_efp_sum
135  public :: center, corner, scalar_pair
136  public :: north, north_east, east, south_east
137  public :: south, south_west, west, north_west
138  public :: zero, ninety, minus_ninety, one_hundred_eighty
139  public :: edgeupdate, nonsymedgeupdate
140 
141  !--- public data imported from mpp_data_mod
142  public :: null_domain1d, null_domain2d
143 
146 
147  !--- public interface from mpp_domains_util.h
150  public :: mpp_get_layout, mpp_get_pelist, operator(.EQ.), operator(.NE.)
156  public :: mpp_get_tile_list
165  public :: mpp_clear_group_update
167  public :: mpp_get_global_domains
169 
170  !--- public interface from mpp_domains_reduce.h
173  !--- public interface from mpp_domains_misc.h
181  public :: mpp_get_boundary
182  public :: mpp_update_domains_ad
183  public :: mpp_get_boundary_ad
185  !--- public interface from mpp_domains_define.h
190 
191  !--- public interface for unstruct domain
192  public :: mpp_define_unstruct_domain, domainug, mpp_get_ug_io_domain
193  public :: mpp_get_ug_domain_npes, mpp_get_ug_compute_domain, mpp_get_ug_domain_tile_id
194  public :: mpp_get_ug_domain_pelist, mpp_get_ug_domain_grid_index
195  public :: mpp_get_ug_domain_ntiles, mpp_get_ug_global_domain
196  public :: mpp_global_field_ug, mpp_get_ug_domain_tile_list, mpp_get_ug_compute_domains
197  public :: mpp_define_null_ug_domain, null_domainug, mpp_get_ug_domains_index
198  public :: mpp_get_ug_sg_domain, mpp_get_ug_domain_tile_pe_inf
199 
200  !--- public interface from mpp_define_domains.inc
202  public :: mpp_shift_nest_domains
203  public :: mpp_get_nest_coarse_domain, mpp_get_nest_fine_domain
204  public :: mpp_is_nest_coarse, mpp_is_nest_fine
205  public :: mpp_get_nest_pelist, mpp_get_nest_npes
206  public :: mpp_get_nest_fine_pelist, mpp_get_nest_fine_npes
207 
208 !----------
209 !ug support
210  public :: mpp_domain_ug_is_tile_root_pe
211  public :: mpp_deallocate_domainug
212  public :: mpp_get_io_domain_ug_layout
213 !----------
214 
215  integer, parameter :: NAME_LENGTH = 64
216  integer, parameter :: MAXLIST = 100
217  integer, parameter :: MAXOVERLAP = 200
218  integer, parameter :: FIELD_S = 0
219  integer, parameter :: FIELD_X = 1
220  integer, parameter :: FIELD_Y = 2
221 
222  !> @}
223 
224  ! data types used by mpp_domains_mod
225 
226  !> @brief Private type for axis specification data for an unstructured grid
227  !> @ingroup mpp_domains_mod
229  private
230  integer :: begin, end, size, max_size
231  integer :: begin_index, end_index
232  end type unstruct_axis_spec
233 
234  !> Private type for axis specification data for an unstructured domain
235  !> @ingroup mpp_domains_mod
237  private
238  type(unstruct_axis_spec) :: compute
239  integer :: pe
240  integer :: pos
241  integer :: tile_id
242  end type unstruct_domain_spec
243 
244  !> Private type
245  !> @ingroup mpp_domains_mod
247  private
248  integer :: count = 0
249  integer :: pe
250  integer, pointer :: i(:)=>null()
251  integer, pointer :: j(:)=>null()
252  end type unstruct_overlap_type
253 
254  !> Private type
255  !> @ingroup mpp_domains_mod
257  private
258  integer :: nsend, nrecv
259  type(unstruct_overlap_type), pointer :: recv(:)=>null()
260  type(unstruct_overlap_type), pointer :: send(:)=>null()
261  end type unstruct_pass_type
262 
263  !> Domain information for managing data on unstructured grids
264  !> @ingroup mpp_domains_mod
265  type :: domainug
266  private
267  type(unstruct_axis_spec) :: compute, global !< axis specifications
268  type(unstruct_domain_spec), pointer :: list(:)=>null() !<
269  type(domainug), pointer :: io_domain=>null() !<
270  type(unstruct_pass_type) :: sg2ug
271  type(unstruct_pass_type) :: ug2sg
272  integer, pointer :: grid_index(:) => null() !< index of grid on current pe
273  type(domain2d), pointer :: sg_domain => null()
274  integer :: pe
275  integer :: pos
276  integer :: ntiles
277  integer :: tile_id
278  integer :: tile_root_pe
279  integer :: tile_npes
280  integer :: npes_io_group
281  integer(i4_kind) :: io_layout
282  end type domainug
283 
284  !> Used to specify index limits along an axis of a domain
285  !> @ingroup mpp_domains_mod
287  private
288  integer :: begin !< start of domain axis
289  integer :: end !< end of domain axis
290  integer :: size !< size of domain axis
291  integer :: max_size !< max size in set
292  logical :: is_global !< .true. if domain axis extent covers global domain
293  end type domain_axis_spec
294 
295  !> A private type used to specify index limits for a domain decomposition
296  !> @ingroup mpp_domains_mod
298  private
299  type(domain_axis_spec) :: compute
300  type(domain_axis_spec) :: global
301  integer :: pos
302  end type domain1d_spec
303 
304  !> @brief Private type to specify multiple index limits and pe information for a 2D domain
305  !> @ingroup mpp_domains_mod
307  private
308  type(domain1d_spec), pointer :: x(:) => null() !< x-direction domain decomposition
309  type(domain1d_spec), pointer :: y(:) => null() !< y-direction domain decomposition
310  integer, pointer :: tile_id(:) => null() !< tile id of each tile
311  integer :: pe !< PE to which this domain is assigned
312  integer :: pos !< position of this PE within link list
313  integer :: tile_root_pe !< root pe of tile.
314  end type domain2d_spec
315 
316  !> Type for overlapping data
317  !> @ingroup mpp_domains_mod
318  type :: overlap_type
319  private
320  integer :: count = 0 !< number of overlapping
321  integer :: pe
322  integer :: start_pos !< start position in the buffer
323  integer :: totsize !< all message size
324  integer , pointer :: msgsize(:) => null() !< overlapping msgsize to be sent or received
325  integer, pointer :: tileme(:) => null() !< my tile id for this overlap
326  integer, pointer :: tilenbr(:) => null() !< neighbor tile id for this overlap
327  integer, pointer :: is(:) => null() !< starting i-index
328  integer, pointer :: ie(:) => null() !< ending i-index
329  integer, pointer :: js(:) => null() !< starting j-index
330  integer, pointer :: je(:) => null() !< ending j-index
331  integer, pointer :: dir(:) => null() !< direction ( value 1,2,3,4 = E,S,W,N)
332  integer, pointer :: rotation(:) => null() !< rotation angle.
333  integer, pointer :: index(:) => null() !< for refinement
334  logical, pointer :: from_contact(:) => null() !< indicate if the overlap is computed from
335  !! define_contact_overlap
336  end type overlap_type
337 
338  !> Private type for overlap specifications
339  !> @ingroup mpp_domains_mod
340  type :: overlapspec
341  private
342  integer :: whalo, ehalo, shalo, nhalo !< halo size
343  integer :: xbegin, xend, ybegin, yend
344  integer :: nsend, nrecv
345  integer :: sendsize, recvsize
346  type(overlap_type), pointer :: send(:) => null()
347  type(overlap_type), pointer :: recv(:) => null()
348  type(overlapspec), pointer :: next => null()
349  end type overlapspec
350 
351  !> @brief Upper and lower x and y bounds for a tile
352  !> @ingroup mpp_domains_mod
353  type :: tile_type
354  integer :: xbegin, xend, ybegin, yend
355  end type tile_type
356 
357  !> The domain2D type contains all the necessary information to
358  !! define the global, compute and data domains of each task, as well as the PE
359  !! associated with the task. The PEs from which remote data may be
360  !! acquired to update the data domain are also contained in a linked list of neighbours.
361  !!
362  !! Domain types of higher rank can be constructed from type domain1D
363  !! typically we only need 1 and 2D, but could need higher (e.g 3D LES)
364  !! some elements are repeated below if they are needed once per domain, not once per axis
365  !> @ingroup mpp_domains_mod
366  TYPE :: domain2d
367  private
368  character(len=NAME_LENGTH) :: name='unnamed' !< name of the domain, default is "unspecified"
369  integer(i8_kind) :: id
370  integer :: pe !< PE to which this domain is assigned
371  integer :: fold
372  integer :: pos !< position of this PE within link list
373  logical :: symmetry !< indicate the domain is symmetric or non-symmetric.
374  integer :: whalo, ehalo !< halo size in x-direction
375  integer :: shalo, nhalo !< halo size in y-direction
376  integer :: ntiles !< number of tiles within mosaic
377  integer :: comm_id !< MPI communicator for the mosaic
378  integer :: tile_comm_id !< MPI communicator for this tile of domain
379  integer :: max_ntile_pe !< maximum value in the pelist of number of tiles on each pe.
380  integer :: ncontacts !< number of contact region within mosaic.
381  logical :: rotated_ninety !< indicate if any contact rotate NINETY or MINUS_NINETY
382  logical :: initialized=.false. !< indicate if the overlapping is computed or not.
383  integer :: tile_root_pe !< root pe of current tile.
384  integer :: io_layout(2) !< io_layout, will be set through mpp_define_io_domain
385  !! default = domain layout
386  integer, pointer :: pearray(:,:) => null() !< pe of each layout position
387  integer, pointer :: tile_id(:) => null() !< tile id of each tile on current processor
388  integer, pointer :: tile_id_all(:)=> null() !< tile id of all the tiles of domain
389  type(domain1d), pointer :: x(:) => null() !< x-direction domain decomposition
390  type(domain1d), pointer :: y(:) => null() !< y-direction domain decomposition
391  type(domain2d_spec),pointer :: list(:) => null() !< domain decomposition on pe list
392  type(tile_type), pointer :: tilelist(:) => null() !< store tile information
393  type(overlapspec), pointer :: check_c => null() !< send and recv information for boundary
394  !! consistency check of C-cell
395  type(overlapspec), pointer :: check_e => null() !< send and recv information for boundary
396  !! consistency check of E-cell
397  type(overlapspec), pointer :: check_n => null() !< send and recv information for boundary
398  !! consistency check of N-cell
399  type(overlapspec), pointer :: bound_c => null() !< send information for getting boundary
400  !! value for symmetry domain.
401  type(overlapspec), pointer :: bound_e => null() !< send information for getting boundary
402  !! value for symmetry domain.
403  type(overlapspec), pointer :: bound_n => null() !< send information for getting boundary
404  !! value for symmetry domain.
405  type(overlapspec), pointer :: update_t => null() !< send and recv information for halo update of T-cell.
406  type(overlapspec), pointer :: update_e => null() !< send and recv information for halo update of E-cell.
407  type(overlapspec), pointer :: update_c => null() !< send and recv information for halo update of C-cell.
408  type(overlapspec), pointer :: update_n => null() !< send and recv information for halo update of N-cell.
409  type(domain2d), pointer :: io_domain => null() !< domain for IO, will be set through calling
410  !! mpp_set_io_domain ( this will be changed).
411  END TYPE domain2d
412 
413  !> Type used to represent the contact between tiles.
414  !> @note This type will only be used in mpp_domains_define.inc
415  !> @ingroup mpp_domains_mod
416  type, private :: contact_type
417  private
418  integer :: ncontact !< number of neighbor tile.
419  integer, pointer :: tile(:) =>null() !< neighbor tile
420  integer, pointer :: align1(:)=>null(), align2(:)=>null() !< alignment of me and neighbor
421  real, pointer :: refine1(:)=>null(), refine2(:)=>null() !
422  integer, pointer :: is1(:)=>null(), ie1(:)=>null() !< i-index of current tile repsenting contact
423  integer, pointer :: js1(:)=>null(), je1(:)=>null() !< j-index of current tile repsenting contact
424  integer, pointer :: is2(:)=>null(), ie2(:)=>null() !< i-index of neighbor tile repsenting contact
425  integer, pointer :: js2(:)=>null(), je2(:)=>null() !< j-index of neighbor tile repsenting contact
426  end type contact_type
427 
428  !> index bounds for use in @ref nestSpec
429  !> @ingroup mpp_domains_mod
430  type :: index_type
431  integer :: is_me, ie_me, js_me, je_me
432  integer :: is_you, ie_you, js_you, je_you
433  end type index_type
434 
435  !> Used to specify bounds and index information for nested tiles as a linked list
436  !> @ingroup mpp_domains_mod
437  type :: nestspec
438  private
439  integer :: xbegin, xend, ybegin, yend
440  integer :: xbegin_c, xend_c, ybegin_c, yend_c
441  integer :: xbegin_f, xend_f, ybegin_f, yend_f
442  integer :: xsize_c, ysize_c
443  type(index_type) :: west, east, south, north, center
444  integer :: nsend, nrecv
445  integer :: extra_halo
446  type(overlap_type), pointer :: send(:) => null()
447  type(overlap_type), pointer :: recv(:) => null()
448  type(nestSpec), pointer :: next => null()
449 
450  end type nestspec
451 
452  !> @brief domain with nested fine and course tiles
453  !> @ingroup mpp_domains_mod
455  character(len=NAME_LENGTH) :: name
456  integer :: num_level
457  integer, pointer :: nest_level(:) => null() !< Added for moving nest functionality
458  type(nest_level_type), pointer :: nest(:) => null()
459  integer :: num_nest
460  integer, pointer :: tile_fine(:), tile_coarse(:)
461  integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
462  integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
463  end type nest_domain_type
464 
465  !> Private type to hold data for each level of nesting
466  !> @ingroup mpp_domains_mod
468  private
469  logical :: on_level
470  logical :: is_fine, is_coarse
471  integer :: num_nest
472  integer :: my_num_nest
473  integer, pointer :: my_nest_id(:)
474  integer, pointer :: tile_fine(:), tile_coarse(:)
475  integer, pointer :: istart_fine(:), iend_fine(:), jstart_fine(:), jend_fine(:)
476  integer, pointer :: istart_coarse(:), iend_coarse(:), jstart_coarse(:), jend_coarse(:)
477  integer :: x_refine, y_refine
478  logical :: is_fine_pe, is_coarse_pe
479  integer, pointer :: pelist(:) => null()
480  integer, pointer :: pelist_fine(:) => null()
481  integer, pointer :: pelist_coarse(:) => null()
482  type(nestSpec), pointer :: C2F_T => null()
483  type(nestSpec), pointer :: C2F_C => null()
484  type(nestSpec), pointer :: C2F_E => null()
485  type(nestSpec), pointer :: C2F_N => null()
486  type(nestSpec), pointer :: F2C_T => null()
487  type(nestSpec), pointer :: F2C_C => null()
488  type(nestSpec), pointer :: F2C_E => null()
489  type(nestSpec), pointer :: F2C_N => null()
490  type(domain2d), pointer :: domain_fine => null()
491  type(domain2d), pointer :: domain_coarse => null()
492  end type nest_level_type
493 
494 
495  !> Used for sending domain data between pe's
496  !> @ingroup mpp_domains_mod
498  private
499  logical :: initialized=.false.
500  integer(i8_kind) :: id=-9999
501  integer(i8_kind) :: l_addr =-9999
502  integer(i8_kind) :: l_addrx =-9999
503  integer(i8_kind) :: l_addry =-9999
504  type(domain2D), pointer :: domain =>null()
505  type(domain2D), pointer :: domain_in =>null()
506  type(domain2D), pointer :: domain_out =>null()
507  type(overlapSpec), pointer :: send(:,:,:,:) => null()
508  type(overlapSpec), pointer :: recv(:,:,:,:) => null()
509  integer, dimension(:,:), allocatable :: sendis
510  integer, dimension(:,:), allocatable :: sendie
511  integer, dimension(:,:), allocatable :: sendjs
512  integer, dimension(:,:), allocatable :: sendje
513  integer, dimension(:,:), allocatable :: recvis
514  integer, dimension(:,:), allocatable :: recvie
515  integer, dimension(:,:), allocatable :: recvjs
516  integer, dimension(:,:), allocatable :: recvje
517  logical, dimension(:), allocatable :: S_do_buf
518  logical, dimension(:), allocatable :: R_do_buf
519  integer, dimension(:), allocatable :: cto_pe
520  integer, dimension(:), allocatable :: cfrom_pe
521  integer, dimension(:), allocatable :: S_msize
522  integer, dimension(:), allocatable :: R_msize
523  integer :: Slist_size=0, rlist_size=0
524  integer :: isize=0, jsize=0, ke=0
525  integer :: isize_in=0, jsize_in=0
526  integer :: isize_out=0, jsize_out=0
527  integer :: isize_max=0, jsize_max=0
528  integer :: gf_ioff=0, gf_joff=0
529  ! Remote data
530  integer, dimension(:) , allocatable :: isizeR
531  integer, dimension(:) , allocatable :: jsizeR
532  integer, dimension(:,:), allocatable :: sendisR
533  integer, dimension(:,:), allocatable :: sendjsR
534  integer(i8_kind), dimension(:), allocatable :: rem_addr
535  integer(i8_kind), dimension(:), allocatable :: rem_addrx
536  integer(i8_kind), dimension(:), allocatable :: rem_addry
537  integer(i8_kind), dimension(:,:), allocatable :: rem_addrl
538  integer(i8_kind), dimension(:,:), allocatable :: rem_addrlx
539  integer(i8_kind), dimension(:,:), allocatable :: rem_addrly
540  integer :: position !< data location. T, E, C, or N.
541  end type domaincommunicator2d
542 
543  integer, parameter :: max_request = 100
544 
545  !> Used for nonblocking data transfer
546  !> @ingroup mpp_domains_mod
548  integer :: recv_pos
549  integer :: send_pos
550  integer :: recv_msgsize
551  integer :: send_msgsize
552  integer :: update_flags
553  integer :: update_position
554  integer :: update_gridtype
555  integer :: update_whalo
556  integer :: update_ehalo
557  integer :: update_shalo
558  integer :: update_nhalo
559  integer :: request_send_count
560  integer :: request_recv_count
561  integer, dimension(MAX_REQUEST) :: request_send
562  integer, dimension(MAX_REQUEST) :: request_recv
563  integer, dimension(MAX_REQUEST) :: size_recv
564  integer, dimension(MAX_REQUEST) :: type_recv
565  integer, dimension(MAX_REQUEST) :: buffer_pos_send
566  integer, dimension(MAX_REQUEST) :: buffer_pos_recv
567  integer(i8_kind) :: field_addrs(MAX_DOMAIN_FIELDS)
568  integer(i8_kind) :: field_addrs2(MAX_DOMAIN_FIELDS)
569  integer :: nfields
570  end type nonblock_type
571 
572  !> used for updates on a group
573  !> @ingroup mpp_domains_mod
575  private
576  logical :: initialized = .false.
577  logical :: k_loop_inside = .true.
578  logical :: nonsym_edge = .false.
579  integer :: nscalar = 0
580  integer :: nvector = 0
581  integer :: flags_s=0, flags_v=0
582  integer :: whalo_s=0, ehalo_s=0, shalo_s=0, nhalo_s=0
583  integer :: isize_s=0, jsize_s=0, ksize_s=1
584  integer :: whalo_v=0, ehalo_v=0, shalo_v=0, nhalo_v=0
585  integer :: isize_x=0, jsize_x=0, ksize_v=1
586  integer :: isize_y=0, jsize_y=0
587  integer :: position=0, gridtype=0
588  logical :: recv_s(8), recv_x(8), recv_y(8)
589  integer :: is_s=0, ie_s=0, js_s=0, je_s=0
590  integer :: is_x=0, ie_x=0, js_x=0, je_x=0
591  integer :: is_y=0, ie_y=0, js_y=0, je_y=0
592  integer :: nrecv=0, nsend=0
593  integer :: npack=0, nunpack=0
594  integer :: reset_index_s = 0
595  integer :: reset_index_v = 0
596  integer :: tot_msgsize = 0
597  integer :: from_pe(MAXOVERLAP)
598  integer :: to_pe(MAXOVERLAP)
599  integer :: recv_size(MAXOVERLAP)
600  integer :: send_size(MAXOVERLAP)
601  integer :: buffer_pos_recv(MAXOVERLAP)
602  integer :: buffer_pos_send(MAXOVERLAP)
603  integer :: pack_type(MAXOVERLAP)
604  integer :: pack_buffer_pos(MAXOVERLAP)
605  integer :: pack_rotation(MAXOVERLAP)
606  integer :: pack_size(MAXOVERLAP)
607  integer :: pack_is(MAXOVERLAP)
608  integer :: pack_ie(MAXOVERLAP)
609  integer :: pack_js(MAXOVERLAP)
610  integer :: pack_je(MAXOVERLAP)
611  integer :: unpack_type(MAXOVERLAP)
612  integer :: unpack_buffer_pos(MAXOVERLAP)
613  integer :: unpack_rotation(MAXOVERLAP)
614  integer :: unpack_size(MAXOVERLAP)
615  integer :: unpack_is(MAXOVERLAP)
616  integer :: unpack_ie(MAXOVERLAP)
617  integer :: unpack_js(MAXOVERLAP)
618  integer :: unpack_je(MAXOVERLAP)
619  integer(i8_kind) :: addrs_s(MAX_DOMAIN_FIELDS)
620  integer(i8_kind) :: addrs_x(MAX_DOMAIN_FIELDS)
621  integer(i8_kind) :: addrs_y(MAX_DOMAIN_FIELDS)
622  integer :: buffer_start_pos = -1
623  integer :: request_send(MAX_REQUEST)
624  integer :: request_recv(MAX_REQUEST)
625  integer :: type_recv(MAX_REQUEST)
626  end type mpp_group_update_type
627 
628  !> One dimensional domain used to manage shared data access between pes
629  !> @ingroup mpp_domains_mod
630  type :: domain1d
631  private
632  type(domain_axis_spec) :: compute !< index limits for compute domain
633  type(domain_axis_spec) :: domain_data !< index limits for data domain
634  type(domain_axis_spec) :: global !< index limits for global domain
635  type(domain_axis_spec) :: memory !< index limits for memory domain
636  logical :: cyclic !< true if domain is cyclic
637  type(domain1d), pointer :: list(:) =>null() !< list of each pe's domains
638  integer :: pe !<PE to which this domain is assigned
639  integer :: pos !< position of this PE within link list, i.e domain%list(pos)%pe = pe
640  integer :: goffset !< needed for global sum
641  integer :: loffset !< needed for global sum
642  end type domain1d
643 
644 !#######################################################################
645 
646 !> @addtogroup mpp_domains_mod
647 !> @{
648 !***********************************************************************
649 !
650 ! module variables
651 !
652 !***********************************************************************
653  integer :: pe
654  logical :: module_is_initialized = .false.
655  logical :: debug = .false.
656  logical :: verbose=.false.
657  logical :: mosaic_defined = .false.
658  integer :: mpp_domains_stack_size=0
659  integer :: mpp_domains_stack_hwm=0
660  type(domain1d),save :: null_domain1d
661  type(domain2d),save :: null_domain2d
662  type(domainug),save :: null_domainug
663  integer :: current_id_update = 0
664  integer :: num_update = 0
665  integer :: num_nonblock_group_update = 0
666  integer :: nonblock_buffer_pos = 0
667  integer :: nonblock_group_buffer_pos = 0
668  logical :: start_update = .true.
669  logical :: complete_update = .false.
670  type(nonblock_type), allocatable :: nonblock_data(:)
671  integer, parameter :: max_nonblock_update = 100
672 
673  integer :: group_update_buffer_pos = 0
674  logical :: complete_group_update_on = .false.
675  !-------- The following variables are used in mpp_domains_comm.h
676 
677  integer, parameter :: max_addrs=512
678  integer(i8_kind),dimension(MAX_ADDRS),save :: addrs_sorted=-9999 !< list of sorted local addresses
679  integer, dimension(-1:MAX_ADDRS),save :: addrs_idx=-9999 !< index of address associated with d_comm
680  integer, save :: a_sort_len=0 !< length sorted memory list
681  integer, save :: n_addrs=0 !< number of memory addresses used
682 
683  integer(i8_kind), parameter :: addr2_base = 65536_i8_kind !< = 0x0000000000010000
684  integer, parameter :: max_addrs2=128
685  integer(i8_kind),dimension(MAX_ADDRS2),save :: addrs2_sorted=-9999 !< list of sorted local addresses
686  integer, dimension(-1:MAX_ADDRS2),save :: addrs2_idx=-9999 !< index of addr2 associated with d_comm
687  integer, save :: a2_sort_len=0 !< length sorted memory list
688  integer, save :: n_addrs2=0 !< number of memory addresses used
689 
690  integer, parameter :: max_dom_ids=128
691  integer(i8_kind),dimension(MAX_DOM_IDS),save :: ids_sorted=-9999 !< list of sorted domain identifiers
692  integer, dimension(-1:MAX_DOM_IDS),save :: ids_idx=-9999 !< index of d_comm associated with sorted addesses
693  integer, save :: i_sort_len=0 !< length sorted domain ids list
694  integer, save :: n_ids=0 !< number of domain ids used
695  !!(=i_sort_len; domain ids are never removed)
696 
697  integer, parameter :: max_fields=1024
698  integer(i8_kind), dimension(MAX_FIELDS),save :: dckey_sorted=-9999 !< list of sorted local addresses
699  ! Not sure why static d_comm fails during deallocation of derived type members; allocatable works
700  ! type(DomainCommunicator2D),dimension(MAX_FIELDS),save,target :: d_comm !< domain communicators
701  type(domaincommunicator2d),dimension(:),allocatable,save,target :: d_comm !< domain communicators
702  integer, dimension(-1:MAX_FIELDS),save :: d_comm_idx=-9999 !< index of
703  !! d_comm associated with sorted addresses
704  integer, save :: dc_sort_len=0 !< length sorted comm keys
705 !! (=num active communicators)
706  integer, save :: n_comm=0 !< number of communicators used
707 
708  ! integer(i8_kind), parameter :: GT_BASE=2**8
709  integer(i8_kind), parameter :: gt_base = 256_i8_kind !0x0000000000000100
710 
711  ! integer(i8_kind), parameter :: KE_BASE=2**48
712  integer(i8_kind), parameter :: ke_base = 281474976710656_i8_kind !0x0001000000000000
713 
714  integer(i8_kind) :: domain_cnt=0
715 
716  !--- the following variables are used in mpp_domains_misc.h
717  logical :: domain_clocks_on=.false.
718  integer :: send_clock=0, recv_clock=0, unpk_clock=0
719  integer :: wait_clock=0, pack_clock=0
720  integer :: send_pack_clock_nonblock=0, recv_clock_nonblock=0, unpk_clock_nonblock=0
721  integer :: wait_clock_nonblock=0
722  integer :: nest_send_clock=0, nest_recv_clock=0, nest_unpk_clock=0
723  integer :: nest_wait_clock=0, nest_pack_clock=0
724  integer :: group_recv_clock=0, group_send_clock=0, group_pack_clock=0, group_unpk_clock=0, group_wait_clock=0
725  integer :: nonblock_group_recv_clock=0, nonblock_group_send_clock=0, nonblock_group_pack_clock=0
726  integer :: nonblock_group_unpk_clock=0, nonblock_group_wait_clock=0
727 
728 !> namelist interface
729  character(len=32) :: debug_update_domain = "none" !< when debug_update_domain = none, no debug will be done.
730  !! When debug_update_domain is set to fatal,
731  !! the run will be exited with fatal error message
732  !! When debug_update_domain is set to warning,
733  !! the run will output warning message.
734  !! When debug update_domain is set to note,
735  !! the run will output some note message.
736  logical :: debug_message_passing = .false. !< Will check the consistency on the boundary between
737  !! processor/tile when updating domain for symmetric domain and
738  !! check the consistency on the north folded edge.
739  integer :: nthread_control_loop = 8 !< Determine the loop order for packing and unpacking.
740  !! When number of threads is greater than nthread_control_loop,
741  !! the k-loop will be moved outside and combined with number
742  !! of pack and unpack. When the number of threads is
743  !! less than or equal to nthread_control_loop, the k-loop
744  !! is moved inside, but still outside, of j,i loop.
745  logical :: efp_sum_overflow_check = .false. !< If .true., always do overflow_check
746  !! when doing EFP bitwise mpp_global_sum.
747  logical :: use_alltoallw = .false.
748  namelist /mpp_domains_nml/ debug_update_domain, domain_clocks_on, debug_message_passing, nthread_control_loop, &
749  efp_sum_overflow_check, use_alltoallw
750 
751  !***********************************************************************
752 
753  integer, parameter :: no_check = -1
754  integer :: debug_update_level = no_check
755 !> @}
756 !***********************************************************************
757 !
758 ! public interface from mpp_domains_define.h
759 !
760 !***********************************************************************
761  !> @brief Retrieve layout associated with a domain decomposition.
762  !> Given a global 2D domain and the number of divisions in the
763  !! decomposition ndivs (usually the PE count unless some
764  !! domains are \e masked) this calls returns a 2D domain layout.
765  !! By default, mpp_define_layout will attempt to divide the
766  !! 2D index space into domains that maintain the aspect ratio of the
767  !! global domain. If this cannot be done, the algorithm favours domains
768  !! that are longer in \e x than \e y, a preference that could improve vector performance.
769  !! <br>Example usage:
770  !! @code{.F90}call mpp_define_layout( global_indices, ndivs, layout )@endcode
771  !> @ingroup mpp_domains_mod
773  module procedure mpp_define_layout2d
774  end interface
775 
776  !> @brief Set up a domain decomposition.
777  !!
778  !> There are two forms for the \e mpp_define_domains call. The 2D version is generally
779  !! to be used but is built by repeated calls to the 1D version, also provided.
780  !!
781  !! <br>Example usage:
782  !!
783  !! call mpp_define_domains( global_indices, ndivs, domain, &
784  !! pelist, flags, halo, extent, maskmap )
785  !! call mpp_define_domains( global_indices, layout, domain, pelist, &
786  !! xflags, yflags, xhalo, yhalo, &
787  !! xextent, yextent, maskmap, name )
788  !!
789  !! @param global_indices Defines the global domain.
790  !! @param ndivs The number of domain divisions required.
791  !! @param [inout] domain Holds the resulting domain decomposition.
792  !! @param pelist List of PEs to which the domains are to be assigned.
793  !! @param flags An optional flag to pass additional information
794  !! about the desired domain topology. Useful flags in a 1D decomposition
795  !! include <TT>GLOBAL_DATA_DOMAIN</TT> and
796  !! <TT>CYCLIC_GLOBAL_DOMAIN</TT>. Flags are integers: multiple flags may
797  !! be added together. The flag values are public parameters available by
798  !! use association.
799  !! @param halo Width of the halo.
800  !! @param extent Normally <TT>mpp_define_domains</TT> attempts
801  !! an even division of the global domain across <TT>ndivs</TT>
802  !! domains. The <TT>extent</TT> array can be used by the user to pass a
803  !! custom domain division. The <TT>extent</TT> array has <TT>ndivs</TT>
804  !! elements and holds the compute domain widths, which should add up to
805  !! cover the global domain exactly.
806  !! @param maskmap Some divisions may be masked
807  !! (<TT>maskmap=.FALSE.</TT>) to exclude them from the computation (e.g
808  !! for ocean model domains that are all land). The <TT>maskmap</TT> array
809  !! is dimensioned <TT>ndivs</TT> and contains <TT>.TRUE.</TT> values for
810  !! any domain that must be <I>included</I> in the computation (default
811  !! all). The <TT>pelist</TT> array length should match the number of
812  !! domains included in the computation.
813  !!
814  !! <br>Example usage:
815  !! @code{.F90}
816  !! call mpp_define_domains( (/1,100/), 10, domain, &
817  !! flags=GLOBAL_DATA_DOMAIN+CYCLIC_GLOBAL_DOMAIN, halo=2 )
818  !! @endcode
819  !!
820  !! defines 10 compute domains spanning the range [1,100] of the global
821  !! domain. The compute domains are non-overlapping blocks of 10. All the data
822  !! domains are global, and with a halo of 2 span the range [-1:102]. And
823  !! since the global domain has been declared to be cyclic,
824  !! domain(9)%next => domain(0) and domain(0)%prev =>
825  !! domain(9). A field is allocated on the data domain, and computations proceed on
826  !! the compute domain. A call to mpp_update_domains would fill in the values
827  !! in the halo region:<br>
828  !!<br>
829  !! @code{.F90}
830  !! call mpp_get_data_domain( domain, isd, ied ) !returns -1 and 102
831  !! call mpp_get_compute_domain( domain, is, ie ) !returns (1,10) on PE 0 ...
832  !! allocate( a(isd:ied) )
833  !! do i = is,ie
834  !! a(i) = &lt;perform computations&gt;
835  !! end do
836  !! call mpp_update_domains( a, domain )
837  !! @endcode
838  !!<br>
839  !! The call to mpp_update_domainsfills in the regions outside
840  !! the compute domain. Since the global domain is cyclic, the values at
841  !! \e i=(-1,0) are the same as at \e i=(99,100); and \e i=(101,102)
842  !! are the same as \e i=(1,2).
843  !!
844  !! The 2D version is just an extension of this syntax to two dimensions.
845  !!
846  !! The 2D version of the above should generally be used in
847  !! codes, including 1D-decomposed ones, if there is a possibility of
848  !! future evolution toward 2D decomposition. The arguments are similar to
849  !! the 1D case, except that now we have optional arguments
850  !! flags, halo, extent and maskmap along two axes.
851  !!
852  !! flags can now take an additional possible value to fold one or more edges.
853  !! This is done by using flags \e FOLD_WEST_EDGE, \e FOLD_EAST_EDGE, \e FOLD_SOUTH_EDGE or
854  !! \e FOLD_NORTH_EDGE. When a fold exists (e.g cylindrical domain),
855  !! vector fields reverse sign upon
856  !! crossing the fold. This parity reversal is performed only in the vector version of
857  !! mpp_update_domains. In addition, shift operations may need to be applied to vector fields on
858  !! staggered grids, also described in the vector interface to mpp_update_domains.
859  !!
860  !! <TT>name</TT> is the name associated with the decomposition,
861  !! e.g <TT>'Ocean model'</TT>. If this argument is present,
862  !! <TT>mpp_define_domains</TT> will print the domain decomposition
863  !! generated to <TT>stdlog</TT>.
864  !!
865  !! <br>Examples:
866  !! call mpp_define_domains( (/1,100,1,100/), (/2,2/), domain, xhalo=1 )
867  !! will create the following domain layout:<br>
868  !!<br>
869  !! | domain |domain(1)|domain(2) |domain(3) |domain(4) |
870  !! |--------------|---------|-----------|-----------|-------------|
871  !! |Compute domain|1,50,1,50|51,100,1,50|1,50,51,100|51,100,51,100|
872  !! |Data domain |0,51,1,50|50,101,1,50|0,51,51,100|50,101,51,100|
873  !!
874  !!
875  !! Again, we allocate arrays on the data domain, perform computations
876  !! on the compute domain, and call mpp_update_domains to update the halo region.
877  !!
878  !! If we wished to perfom a 1D decomposition along Y on the same global domain,
879  !! we could use:
880  !!
881  !! call mpp_define_domains( (/1,100,1,100/), layout=(/4,1/), domain, xhalo=1 )
882  !!
883  !! This will create the following domain layout:<br>
884  !!<br>
885  !! | domain |domain(1) |domain(2) |domain(3) |domain(4) |
886  !! |--------------|----------|-----------|-----------|------------|
887  !! |Compute domain|1,100,1,25|1,100,26,50|1,100,51,75|1,100,76,100|
888  !! |Data domain |0,101,1,25|0,101,26,50|0,101,51,75|1,101,76,100|
889  !> @ingroup mpp_domains_mod
891  module procedure mpp_define_domains1d
892  module procedure mpp_define_domains2d
893  end interface
894 
895  !> Defines a nullified 1D or 2D domain
896  !!
897  !> <br> Example usage:
898  !! @code{.F90}
899  !! call mpp_define_null_domain(domain)
900  !! @endcode
901  !> @ingroup mpp_domains_mod
903  module procedure mpp_define_null_domain1d
904  module procedure mpp_define_null_domain2d
905  end interface
906 
907  !> Copy 1D or 2D domain
908  !> @param domain_in Input domain to get read
909  !> @param domain_out Output domain to get written to
910  !> @ingroup mpp_domains_mod
911  interface mpp_copy_domain
912  module procedure mpp_copy_domain1d
913  module procedure mpp_copy_domain2d
914  end interface mpp_copy_domain
915  !> Deallocate given 1D or 2D domain
916  !> @param domain an allocated @ref domain1D or @ref domain2D
917  !> @ingroup mpp_domains_mod
919  module procedure mpp_deallocate_domain1d
920  module procedure mpp_deallocate_domain2d
921  end interface
922 
923  !> Modifies the extents (compute, data and global) of a given domain
924  !> @ingroup mpp_domains_mod
926  module procedure mpp_modify_domain1d
927  module procedure mpp_modify_domain2d
928  end interface
929 
930 
931 !***********************************************************************
932 !
933 ! public interface from mpp_domains_misc.h
934 !
935 !***********************************************************************
936 
937 !> Performs halo updates for a given domain.<br>
938 !!
939 !! Used to perform a halo update of a
940 !! domain-decomposed array on each PE. \e MPP_TYPE can be of type
941 !! complex, integer, logical or real of 4-byte or 8-byte kind; of rank up to 5.
942 !! The vector version (with two input data fields) is only present for real types.
943 !! For 2D domain updates, if there are halos present along both
944 !! x and y, we can choose to update one only, by specifying \e flags=XUPDATE or \e flags=YUPDATE.
945 !! In addition, one-sided updates can be performed by setting flags
946 !! to any combination of WUPDATE, EUPDATE, SUPDATE and NUPDATE
947 !! to update the west, east, north and south halos respectively.
948 !! Any combination of halos may be used by adding the requisite flags, e.g:
949 !! \e flags=XUPDATE+SUPDATE or \e flags=EUPDATE+WUPDATE+SUPDATE will update the east,
950 !! west and south halos.<br>
951 !!<br>
952 !! If a call to \e mpp_update_domains involves at least one E-W
953 !! halo and one N-S halo, the corners involved will also be updated, i.e,
954 !! in the example above, the SE and SW corners will be updated.<br>
955 !! If \e flags is not supplied, that is equivalent to \e flags=XUPDATE+YUPDATE.<br>
956 !!<br>
957 !! The vector version is passed the \e x and \e y components of a vector field in tandem,
958 !! and both are updated upon return. They are passed together to treat parity issues on various
959 !! grids. For example, on a cubic sphere projection, the \e x \e y components may be
960 !! interchanged when passing from an equatorial cube face to a polar face.
961 !! For grids with folds, vector components change sign on crossing the fold. Paired scalar
962 !! quantities can also be passed with the vector version if \e flags=SCALAR_PAIR, in which
963 !! case components are appropriately interchanged, but signs are not.<br>
964 !!<br>
965 !! Special treatment at boundaries such as folds is also required for
966 !! staggered grids. The following types of staggered grids are
967 !! recognized:<br>
968 !!<br>
969 !! 1) AGRID: values are at grid centers.<br>
970 !! 2) BGRID_NE: vector fields are at the NE vertex of a grid
971 !! cell, i.e: the array elements \e u(i,j)and \e v(i,j)are
972 !! actually at (i,j;) with respect to the grid centers.<br>
973 !! 3) BGRID_SW: vector fields are at the SW vertex of a grid
974 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
975 !! actually at (i;,j;) with respect to the grid centers<br>
976 !! 4) CGRID_NE: vector fields are at the N and E faces of a
977 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
978 !! are actually at (i;,j) and (i,j+&#189;) with respect to the
979 !! grid centers.<br>
980 !! 5) CGRID_SW: vector fields are at the S and W faces of a
981 !! grid cell, i.e: the array elements \e u(i,j)and \e v(i,j)
982 !! are actually at (i;,j) and (i,j;) with respect to the
983 !! grid centers.<br>
984 !!<br>
985 !! The gridtypes listed above are all available by use association as
986 !! integer parameters. The scalar version of \e mpp_update_domains
987 !! assumes that the values of a scalar field are always at \e AGRID
988 !! locations, and no special boundary treatment is required. If vector
989 !! fields are at staggered locations, the optional argument
990 !! \e gridtype must be appropriately set for correct treatment at
991 !! boundaries.
992 !!<br>
993 !! It is safe to apply vector field updates to the appropriate arrays
994 !! irrespective of the domain topology: if the topology requires no
995 !! special treatment of vector fields, specifying \e gridtype will
996 !! do no harm.<br>
997 !!<br>
998 !! \e mpp_update_domains internally buffers the date being sent
999 !! and received into single messages for efficiency. A turnable internal
1000 !! buffer area in memory is provided for this purpose by
1001 !! \e mpp_domains_mod. The size of this buffer area can be set by
1002 !! the user by calling mpp_domains
1003 !! \e mpp_domains_set_stack_size.
1004 !!
1005 !! Example usage:
1006 !! call mpp_update_domains( field, domain, flags )
1007 !! Update a 1D domain for the given field.
1008 !! call mpp_update_domains( fieldx, fieldy, domain, flags, gridtype )
1009 !! Update a 2D domain for the given fields.
1010 !> @ingroup mpp_domains_mod
1012  module procedure mpp_update_domain2d_r8_2d
1013  module procedure mpp_update_domain2d_r8_3d
1014  module procedure mpp_update_domain2d_r8_4d
1015  module procedure mpp_update_domain2d_r8_5d
1016  module procedure mpp_update_domain2d_r8_2dv
1017  module procedure mpp_update_domain2d_r8_3dv
1018  module procedure mpp_update_domain2d_r8_4dv
1019  module procedure mpp_update_domain2d_r8_5dv
1020 #ifdef OVERLOAD_C8
1021  module procedure mpp_update_domain2d_c8_2d
1022  module procedure mpp_update_domain2d_c8_3d
1023  module procedure mpp_update_domain2d_c8_4d
1024  module procedure mpp_update_domain2d_c8_5d
1025 #endif
1026  module procedure mpp_update_domain2d_i8_2d
1027  module procedure mpp_update_domain2d_i8_3d
1028  module procedure mpp_update_domain2d_i8_4d
1029  module procedure mpp_update_domain2d_i8_5d
1030  module procedure mpp_update_domain2d_r4_2d
1031  module procedure mpp_update_domain2d_r4_3d
1032  module procedure mpp_update_domain2d_r4_4d
1033  module procedure mpp_update_domain2d_r4_5d
1034  module procedure mpp_update_domain2d_r4_2dv
1035  module procedure mpp_update_domain2d_r4_3dv
1036  module procedure mpp_update_domain2d_r4_4dv
1037  module procedure mpp_update_domain2d_r4_5dv
1038 #ifdef OVERLOAD_C4
1039  module procedure mpp_update_domain2d_c4_2d
1040  module procedure mpp_update_domain2d_c4_3d
1041  module procedure mpp_update_domain2d_c4_4d
1042  module procedure mpp_update_domain2d_c4_5d
1043 #endif
1044  module procedure mpp_update_domain2d_i4_2d
1045  module procedure mpp_update_domain2d_i4_3d
1046  module procedure mpp_update_domain2d_i4_4d
1047  module procedure mpp_update_domain2d_i4_5d
1048  end interface
1049 
1050 !> Interface to start halo updates
1051 !! \e mpp_start_update_domains is used to start a halo update of a
1052 !! domain-decomposed array on each PE. \e MPP_TYPE_ can be of type
1053 !! \e complex, \e integer, \e logical or \e real;
1054 !! of 4-byte or 8-byte kind; of rank up to 5. The vector version (with
1055 !! two input data fields) is only present for \ereal types.<br>
1056 !!<br>
1057 !! \empp_start_update_domains must be paired together with
1058 !! \empp_complete_update_domains. In \e mpp_start_update_domains,
1059 !! a buffer will be pre-post to receive (non-blocking) the
1060 !! data and data on computational domain will be packed and sent (non-blocking send)
1061 !! to other processor. In \e mpp_complete_update_domains, buffer will
1062 !! be unpacked to fill the halo and mpp_sync_self will be called to
1063 !! to ensure communication safe at the last call of mpp_complete_update_domains.<br>
1064 !!<br>
1065 !! Each mpp_update_domains can be replaced by the combination of mpp_start_update_domains
1066 !! and mpp_complete_update_domains. The arguments in mpp_start_update_domains
1067 !! and mpp_complete_update_domains should be the exact the same as in
1068 !! mpp_update_domains to be replaced except no optional argument "complete".
1069 !! The following are examples on how to replace mpp_update_domains with
1070 !! mpp_start_update_domains/mpp_complete_update_domains
1071 !!
1072 !> @par Example 1: Replace one scalar mpp_update_domains.<br>
1073 !!<br>
1074 !! Replace<br>
1075 !!<br>
1076 !! call mpp_update_domains(data, domain, flags=update_flags)<br>
1077 !!
1078 !! with<br>
1079 !!<br>
1080 !! id_update = mpp_start_update_domains(data, domain, flags=update_flags)<br>
1081 !! ...( doing some computation )<br>
1082 !! call mpp_complete_update_domains(id_update, data, domain, flags=update_flags)<br>
1083 !!
1084 !> @par Example 2: Replace group scalar mpp_update_domains<br>
1085 !!<br>
1086 !! Replace<br>
1087 !!<br>
1088 !! call mpp_update_domains(data_1, domain, flags=update_flags, complete=.false.)<br>
1089 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1090 !! call mpp_update_domains(data_n, domain, flags=update_flags, complete=.true. )<br>
1091 !!<br>
1092 !! With<br>
1093 !!<br>
1094 !! id_up_1 = mpp_start_update_domains(data_1, domain, flags=update_flags)<br>
1095 !! .... ( other n-2 call mpp_start_update_domains )<br>
1096 !! id_up_n = mpp_start_update_domains(data_n, domain, flags=update_flags)<br>
1097 !!<br>
1098 !! ..... ( doing some computation )<br>
1099 !!<br>
1100 !! call mpp_complete_update_domains(id_up_1, data_1, domain, flags=update_flags)<br>
1101 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1102 !! call mpp_complete_update_domains(id_up_n, data_n, domain, flags=update_flags)<br>
1103 !!
1104 !> @par Example 3: Replace group CGRID_NE vector, mpp_update_domains<br>
1105 !!<br>
1106 !! Replace<br>
1107 !!<br>
1108 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flgs, gridtype=CGRID_NE, complete=.false.)<br>
1109 !! .... ( other n-2 call mpp_update_domains with complete = .false. )<br>
1110 !! call mpp_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE, complete=.true. )<br>
1111 !!<br>
1112 !! with<br>
1113 !!<br>
1114 !! id_up_1 = mpp_start_update_domains(u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1115 !! .... ( other n-2 call mpp_start_update_domains )<br>
1116 !! id_up_n = mpp_start_update_domains(u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1117 !!<br>
1118 !! ..... ( doing some computation )<br>
1119 !!<br>
1120 !! call mpp_complete_update_domains(id_up_1, u_1, v_1, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1121 !! .... ( other n-2 call mpp_complete_update_domains )<br>
1122 !! call mpp_complete_update_domains(id_up_n, u_n, v_n, domain, flags=update_flags, gridtype=CGRID_NE)<br>
1123 !!<br>
1124 !! For 2D domain updates, if there are halos present along both
1125 !! \e x and \e y, we can choose to update one only, by
1126 !! specifying \e flags=XUPDATE or \e flags=YUPDATE. In
1127 !! addition, one-sided updates can be performed by setting \e flags
1128 !! to any combination of \e WUPDATE, \e EUPDATE,
1129 !! \e SUPDATE and \e NUPDATE, to update the west, east, north
1130 !! and south halos respectively. Any combination of halos may be used by
1131 !! adding the requisite flags, e.g: \e flags=XUPDATE+SUPDATE or
1132 !! \e flags=EUPDATE+WUPDATE+SUPDATE will update the east, west and
1133 !! south halos.<br>
1134 !!<br>
1135 !! If a call to \e mpp_start_update_domains/mpp_complete_update_domains involves at least one E-W
1136 !! halo and one N-S halo, the corners involved will also be updated, i.e,
1137 !! in the example above, the SE and SW corners will be updated.<br>
1138 !!<br>
1139 !! If \e flags is not supplied, that is
1140 !! equivalent to \e flags=XUPDATE+YUPDATE.<br>
1141 !!<br>
1142 !! The vector version is passed the \e x and \e y
1143 !! components of a vector field in tandem, and both are updated upon
1144 !! return. They are passed together to treat parity issues on various
1145 !! grids. For example, on a cubic sphere projection, the \e x and
1146 !! \e y components may be interchanged when passing from an
1147 !! equatorial cube face to a polar face. For grids with folds, vector
1148 !! components change sign on crossing the fold. Paired scalar quantities
1149 !! can also be passed with the vector version if flags=SCALAR_PAIR, in which
1150 !! case components are appropriately interchanged, but signs are not.<br>
1151 !!<br>
1152 !! Special treatment at boundaries such as folds is also required for
1153 !! staggered grids. The following types of staggered grids are
1154 !! recognized:
1155 !!<br>
1156 !! 1) \e AGRID: values are at grid centers.<br>
1157 !! 2) \e BGRID_NE: vector fields are at the NE vertex of a grid
1158 !! cell, i.e: the array elements \e u(i,j) and \e v(i,j) are
1159 !! actually at (i+&#189;,j+&#189;) with respect to the grid centers.<br>
1160 !! 3) \e BGRID_SW: vector fields are at the SW vertex of a grid
1161 !! cell, i.e., the array elements \e u(i,j) and \e v(i,j) are
1162 !! actually at (i-&#189;,j-&#189;) with respect to the grid centers.<br>
1163 !! 4) \e CGRID_NE: vector fields are at the N and E faces of a
1164 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1165 !! are actually at (i+&#189;,j) and (i,j+&#189;) with respect to the
1166 !! grid centers.<br>
1167 !! 5) \e CGRID_SW: vector fields are at the S and W faces of a
1168 !! grid cell, i.e: the array elements \e u(i,j) and \e v(i,j)
1169 !! are actually at (i-&#189;,j) and (i,j-&#189;) with respect to the
1170 !! grid centers.<br>
1171 !!<br>
1172 !! The gridtypes listed above are all available by use association as
1173 !! integer parameters. If vector fields are at staggered locations, the
1174 !! optional argument \e gridtype must be appropriately set for
1175 !! correct treatment at boundaries.
1176 !!<br>
1177 !! It is safe to apply vector field updates to the appropriate arrays
1178 !! irrespective of the domain topology: if the topology requires no
1179 !! special treatment of vector fields, specifying \e gridtype will
1180 !! do no harm.<br>
1181 !!<br>
1182 !! \e mpp_start_update_domains/mpp_complete_update_domains internally
1183 !! buffers the data being sent and received into single messages for efficiency.
1184 !! A turnable internal buffer area in memory is provided for this purpose by
1185 !! \e mpp_domains_mod. The size of this buffer area can be set by
1186 !! the user by calling \e mpp_domains_set_stack_size.<br>
1187 !! Example usage:
1188 !!
1189 !! call mpp_start_update_domains( field, domain, flags )
1190 !! call mpp_complete_update_domains( field, domain, flags )
1191 !> @ingroup mpp_domains_mod
1193  module procedure mpp_start_update_domain2d_r8_2d
1194  module procedure mpp_start_update_domain2d_r8_3d
1195  module procedure mpp_start_update_domain2d_r8_4d
1196  module procedure mpp_start_update_domain2d_r8_5d
1197  module procedure mpp_start_update_domain2d_r8_2dv
1198  module procedure mpp_start_update_domain2d_r8_3dv
1199  module procedure mpp_start_update_domain2d_r8_4dv
1200  module procedure mpp_start_update_domain2d_r8_5dv
1201 #ifdef OVERLOAD_C8
1202  module procedure mpp_start_update_domain2d_c8_2d
1203  module procedure mpp_start_update_domain2d_c8_3d
1204  module procedure mpp_start_update_domain2d_c8_4d
1205  module procedure mpp_start_update_domain2d_c8_5d
1206 #endif
1207  module procedure mpp_start_update_domain2d_i8_2d
1208  module procedure mpp_start_update_domain2d_i8_3d
1209  module procedure mpp_start_update_domain2d_i8_4d
1210  module procedure mpp_start_update_domain2d_i8_5d
1211  module procedure mpp_start_update_domain2d_r4_2d
1212  module procedure mpp_start_update_domain2d_r4_3d
1213  module procedure mpp_start_update_domain2d_r4_4d
1214  module procedure mpp_start_update_domain2d_r4_5d
1215  module procedure mpp_start_update_domain2d_r4_2dv
1216  module procedure mpp_start_update_domain2d_r4_3dv
1217  module procedure mpp_start_update_domain2d_r4_4dv
1218  module procedure mpp_start_update_domain2d_r4_5dv
1219 #ifdef OVERLOAD_C4
1220  module procedure mpp_start_update_domain2d_c4_2d
1221  module procedure mpp_start_update_domain2d_c4_3d
1222  module procedure mpp_start_update_domain2d_c4_4d
1223  module procedure mpp_start_update_domain2d_c4_5d
1224 #endif
1225  module procedure mpp_start_update_domain2d_i4_2d
1226  module procedure mpp_start_update_domain2d_i4_3d
1227  module procedure mpp_start_update_domain2d_i4_4d
1228  module procedure mpp_start_update_domain2d_i4_5d
1229  end interface
1230 
1231  !> Must be used after a call to @ref mpp_start_update_domains
1232  !! in order to complete a nonblocking domain update. See @ref mpp_start_update_domains
1233  !! for more info.
1234  !> @ingroup mpp_domains_mod
1236  module procedure mpp_complete_update_domain2d_r8_2d
1237  module procedure mpp_complete_update_domain2d_r8_3d
1238  module procedure mpp_complete_update_domain2d_r8_4d
1239  module procedure mpp_complete_update_domain2d_r8_5d
1240  module procedure mpp_complete_update_domain2d_r8_2dv
1241  module procedure mpp_complete_update_domain2d_r8_3dv
1242  module procedure mpp_complete_update_domain2d_r8_4dv
1243  module procedure mpp_complete_update_domain2d_r8_5dv
1244 #ifdef OVERLOAD_C8
1245  module procedure mpp_complete_update_domain2d_c8_2d
1246  module procedure mpp_complete_update_domain2d_c8_3d
1247  module procedure mpp_complete_update_domain2d_c8_4d
1248  module procedure mpp_complete_update_domain2d_c8_5d
1249 #endif
1250  module procedure mpp_complete_update_domain2d_i8_2d
1251  module procedure mpp_complete_update_domain2d_i8_3d
1252  module procedure mpp_complete_update_domain2d_i8_4d
1253  module procedure mpp_complete_update_domain2d_i8_5d
1254  module procedure mpp_complete_update_domain2d_r4_2d
1255  module procedure mpp_complete_update_domain2d_r4_3d
1256  module procedure mpp_complete_update_domain2d_r4_4d
1257  module procedure mpp_complete_update_domain2d_r4_5d
1258  module procedure mpp_complete_update_domain2d_r4_2dv
1259  module procedure mpp_complete_update_domain2d_r4_3dv
1260  module procedure mpp_complete_update_domain2d_r4_4dv
1261  module procedure mpp_complete_update_domain2d_r4_5dv
1262 #ifdef OVERLOAD_C4
1263  module procedure mpp_complete_update_domain2d_c4_2d
1264  module procedure mpp_complete_update_domain2d_c4_3d
1265  module procedure mpp_complete_update_domain2d_c4_4d
1266  module procedure mpp_complete_update_domain2d_c4_5d
1267 #endif
1268  module procedure mpp_complete_update_domain2d_i4_2d
1269  module procedure mpp_complete_update_domain2d_i4_3d
1270  module procedure mpp_complete_update_domain2d_i4_4d
1271  module procedure mpp_complete_update_domain2d_i4_5d
1272  end interface
1273 
1274  !> Private interface used for non blocking updates
1275  !> @ingroup mpp_domains_mod
1277  module procedure mpp_start_do_update_r8_3d
1278  module procedure mpp_start_do_update_r8_3dv
1279 #ifdef OVERLOAD_C8
1280  module procedure mpp_start_do_update_c8_3d
1281 #endif
1282  module procedure mpp_start_do_update_i8_3d
1283  module procedure mpp_start_do_update_r4_3d
1284  module procedure mpp_start_do_update_r4_3dv
1285 #ifdef OVERLOAD_C4
1286  module procedure mpp_start_do_update_c4_3d
1287 #endif
1288  module procedure mpp_start_do_update_i4_3d
1289  end interface
1290 
1291  !> Private interface used for non blocking updates
1292  !> @ingroup mpp_domains_mod
1294  module procedure mpp_complete_do_update_r8_3d
1295  module procedure mpp_complete_do_update_r8_3dv
1296 #ifdef OVERLOAD_C8
1297  module procedure mpp_complete_do_update_c8_3d
1298 #endif
1299  module procedure mpp_complete_do_update_i8_3d
1300  module procedure mpp_complete_do_update_r4_3d
1301  module procedure mpp_complete_do_update_r4_3dv
1302 #ifdef OVERLOAD_C4
1303  module procedure mpp_complete_do_update_c4_3d
1304 #endif
1305  module procedure mpp_complete_do_update_i4_3d
1306  end interface
1307 
1308  !> Constructor for the @ref mpp_group_update_type which is
1309  !! then used with @ref mpp_start_group_update
1310  !!
1311  !> @param
1312  !> @ingroup mpp_domains_mod
1314  module procedure mpp_create_group_update_r4_2d
1315  module procedure mpp_create_group_update_r4_3d
1316  module procedure mpp_create_group_update_r4_4d
1317  module procedure mpp_create_group_update_r4_2dv
1318  module procedure mpp_create_group_update_r4_3dv
1319  module procedure mpp_create_group_update_r4_4dv
1320  module procedure mpp_create_group_update_r8_2d
1321  module procedure mpp_create_group_update_r8_3d
1322  module procedure mpp_create_group_update_r8_4d
1323  module procedure mpp_create_group_update_r8_2dv
1324  module procedure mpp_create_group_update_r8_3dv
1325  module procedure mpp_create_group_update_r8_4dv
1326  end interface mpp_create_group_update
1327 
1328  !> @ingroup mpp_domains_mod
1330  module procedure mpp_do_group_update_r4
1331  module procedure mpp_do_group_update_r8
1332  end interface mpp_do_group_update
1333 
1334  !> Starts non-blocking group update
1335  !! Must be followed up with a call to @ref mpp_complete_group_update
1336  !! @ref mpp_group_update_type can be created with @ref mpp_create_group_update
1337  !!
1338  !> @param[inout] type(mpp_group_update_type) group type created for group update
1339  !> @param[inout] type(domain2D) domain to update
1340  !> @ingroup mpp_domains_mod
1342  module procedure mpp_start_group_update_r4
1343  module procedure mpp_start_group_update_r8
1344  end interface mpp_start_group_update
1345 
1346  !> Completes a pending non-blocking group update
1347  !! Must follow a call to @ref mpp_start_group_update
1348  !!
1349  !> @param[inout] type(mpp_group_update_type) group
1350  !> @param[inout] type(domain2D) domain
1351  !> @param[in] d_type data type
1352  !> @ingroup mpp_domains_mod
1354  module procedure mpp_complete_group_update_r4
1355  module procedure mpp_complete_group_update_r8
1356  end interface mpp_complete_group_update
1357 
1358  !> @ingroup mpp_domains_mod
1360  module procedure mpp_reset_group_update_field_r4_2d
1361  module procedure mpp_reset_group_update_field_r4_3d
1362  module procedure mpp_reset_group_update_field_r4_4d
1363  module procedure mpp_reset_group_update_field_r4_2dv
1364  module procedure mpp_reset_group_update_field_r4_3dv
1365  module procedure mpp_reset_group_update_field_r4_4dv
1366  module procedure mpp_reset_group_update_field_r8_2d
1367  module procedure mpp_reset_group_update_field_r8_3d
1368  module procedure mpp_reset_group_update_field_r8_4d
1369  module procedure mpp_reset_group_update_field_r8_2dv
1370  module procedure mpp_reset_group_update_field_r8_3dv
1371  module procedure mpp_reset_group_update_field_r8_4dv
1372  end interface mpp_reset_group_update_field
1373 
1374  !> Pass the data from coarse grid to fill the buffer to be ready to be interpolated
1375  !! onto fine grid.
1376  !! <br>Example usage:
1377  !!
1378  !! call mpp_update_nest_fine(field, nest_domain, wbuffer, ebuffer, sbuffer,
1379  !! nbuffer, nest_level, flags, complete, position, extra_halo, name,
1380  !! tile_count)
1381  !> @ingroup mpp_domains_mod
1383  module procedure mpp_update_nest_fine_r8_2d
1384  module procedure mpp_update_nest_fine_r8_3d
1385  module procedure mpp_update_nest_fine_r8_4d
1386  module procedure mpp_update_nest_fine_r8_2dv
1387  module procedure mpp_update_nest_fine_r8_3dv
1388  module procedure mpp_update_nest_fine_r8_4dv
1389 #ifdef OVERLOAD_C8
1390  module procedure mpp_update_nest_fine_c8_2d
1391  module procedure mpp_update_nest_fine_c8_3d
1392  module procedure mpp_update_nest_fine_c8_4d
1393 #endif
1394  module procedure mpp_update_nest_fine_i8_2d
1395  module procedure mpp_update_nest_fine_i8_3d
1396  module procedure mpp_update_nest_fine_i8_4d
1397  module procedure mpp_update_nest_fine_r4_2d
1398  module procedure mpp_update_nest_fine_r4_3d
1399  module procedure mpp_update_nest_fine_r4_4d
1400  module procedure mpp_update_nest_fine_r4_2dv
1401  module procedure mpp_update_nest_fine_r4_3dv
1402  module procedure mpp_update_nest_fine_r4_4dv
1403 #ifdef OVERLOAD_C4
1404  module procedure mpp_update_nest_fine_c4_2d
1405  module procedure mpp_update_nest_fine_c4_3d
1406  module procedure mpp_update_nest_fine_c4_4d
1407 #endif
1408  module procedure mpp_update_nest_fine_i4_2d
1409  module procedure mpp_update_nest_fine_i4_3d
1410  module procedure mpp_update_nest_fine_i4_4d
1411  end interface
1412 
1413  !> @ingroup mpp_domains_mod
1415  module procedure mpp_do_update_nest_fine_r8_3d
1416  module procedure mpp_do_update_nest_fine_r8_3dv
1417 #ifdef OVERLOAD_C8
1418  module procedure mpp_do_update_nest_fine_c8_3d
1419 #endif
1420  module procedure mpp_do_update_nest_fine_i8_3d
1421  module procedure mpp_do_update_nest_fine_r4_3d
1422  module procedure mpp_do_update_nest_fine_r4_3dv
1423 #ifdef OVERLOAD_C4
1424  module procedure mpp_do_update_nest_fine_c4_3d
1425 #endif
1426  module procedure mpp_do_update_nest_fine_i4_3d
1427  end interface
1428 
1429  !> Pass the data from fine grid to fill the buffer to be ready to be interpolated
1430  !! onto coarse grid.
1431  !! <br>Example usage:
1432  !!
1433  !! call mpp_update_nest_coarse(field, nest_domain, field_out, nest_level, complete,
1434  !! position, name, tile_count)
1435  !> @ingroup mpp_domains_mod
1437  module procedure mpp_update_nest_coarse_r8_2d
1438  module procedure mpp_update_nest_coarse_r8_3d
1439  module procedure mpp_update_nest_coarse_r8_4d
1440  module procedure mpp_update_nest_coarse_r8_2dv
1441  module procedure mpp_update_nest_coarse_r8_3dv
1442  module procedure mpp_update_nest_coarse_r8_4dv
1443 #ifdef OVERLOAD_C8
1444  module procedure mpp_update_nest_coarse_c8_2d
1445  module procedure mpp_update_nest_coarse_c8_3d
1446  module procedure mpp_update_nest_coarse_c8_4d
1447 #endif
1448  module procedure mpp_update_nest_coarse_i8_2d
1449  module procedure mpp_update_nest_coarse_i8_3d
1450  module procedure mpp_update_nest_coarse_i8_4d
1451  module procedure mpp_update_nest_coarse_r4_2d
1452  module procedure mpp_update_nest_coarse_r4_3d
1453  module procedure mpp_update_nest_coarse_r4_4d
1454  module procedure mpp_update_nest_coarse_r4_2dv
1455  module procedure mpp_update_nest_coarse_r4_3dv
1456  module procedure mpp_update_nest_coarse_r4_4dv
1457 #ifdef OVERLOAD_C4
1458  module procedure mpp_update_nest_coarse_c4_2d
1459  module procedure mpp_update_nest_coarse_c4_3d
1460  module procedure mpp_update_nest_coarse_c4_4d
1461 #endif
1462  module procedure mpp_update_nest_coarse_i4_2d
1463  module procedure mpp_update_nest_coarse_i4_3d
1464  module procedure mpp_update_nest_coarse_i4_4d
1465  end interface
1466 
1467  !> @brief Used by @ref mpp_update_nest_coarse to perform domain updates
1468  !!
1469  !> @ingroup mpp_domains_mod
1471  module procedure mpp_do_update_nest_coarse_r8_3d
1472  module procedure mpp_do_update_nest_coarse_r8_3dv
1473 #ifdef OVERLOAD_C8
1474  module procedure mpp_do_update_nest_coarse_c8_3d
1475 #endif
1476  module procedure mpp_do_update_nest_coarse_i8_3d
1477  module procedure mpp_do_update_nest_coarse_r4_3d
1478  module procedure mpp_do_update_nest_coarse_r4_3dv
1479 #ifdef OVERLOAD_C4
1480  module procedure mpp_do_update_nest_coarse_c4_3d
1481 #endif
1482  module procedure mpp_do_update_nest_coarse_i4_3d
1483  end interface
1484 
1485  !> Get the index of the data passed from fine grid to coarse grid.
1486  !! <br>Example usage:
1487  !!
1488  !! call mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse,
1489  !! is_fine, ie_fine, js_fine, je_fine, nest_level, position)
1490  !> @ingroup mpp_domains_mod
1492  module procedure mpp_get_f2c_index_fine
1493  module procedure mpp_get_f2c_index_coarse
1494  end interface
1495 
1496  !> Broadcasts domain to every pe. Only useful outside the context of it's own pelist
1497  !!
1498  !> <br>Example usage:
1499  !! call mpp_broadcast_domain(domain)
1500  !! call mpp_broadcast_domain(domain_in, domain_out)
1501  !! call mpp_broadcast_domain(domain, tile_coarse) ! nested domains
1502  !!
1503  !> @ingroup mpp_domains_mod
1505  module procedure mpp_broadcast_domain_1
1506  module procedure mpp_broadcast_domain_2
1507  module procedure mpp_broadcast_domain_ug
1508  module procedure mpp_broadcast_domain_nest_fine
1509  module procedure mpp_broadcast_domain_nest_coarse
1510  end interface
1511 
1512 !--------------------------------------------------------------
1513 ! for adjoint update
1514 !--------------------------------------------------------------
1515  !> Similar to @ref mpp_update_domains , updates adjoint domains
1516  !> @ingroup mpp_domains_mod
1518  module procedure mpp_update_domains_ad_2d_r8_2d
1519  module procedure mpp_update_domains_ad_2d_r8_3d
1520  module procedure mpp_update_domains_ad_2d_r8_4d
1521  module procedure mpp_update_domains_ad_2d_r8_5d
1522  module procedure mpp_update_domains_ad_2d_r8_2dv
1523  module procedure mpp_update_domains_ad_2d_r8_3dv
1524  module procedure mpp_update_domains_ad_2d_r8_4dv
1525  module procedure mpp_update_domains_ad_2d_r8_5dv
1526  module procedure mpp_update_domains_ad_2d_r4_2d
1527  module procedure mpp_update_domains_ad_2d_r4_3d
1528  module procedure mpp_update_domains_ad_2d_r4_4d
1529  module procedure mpp_update_domains_ad_2d_r4_5d
1530  module procedure mpp_update_domains_ad_2d_r4_2dv
1531  module procedure mpp_update_domains_ad_2d_r4_3dv
1532  module procedure mpp_update_domains_ad_2d_r4_4dv
1533  module procedure mpp_update_domains_ad_2d_r4_5dv
1534  end interface
1535 !
1536  !> Private interface used for @ref mpp_update_domains
1537  !> @ingroup mpp_domains_mod
1538  interface mpp_do_update
1539  module procedure mpp_do_update_r8_3d
1540  module procedure mpp_do_update_r8_3dv
1541 #ifdef OVERLOAD_C8
1542  module procedure mpp_do_update_c8_3d
1543 #endif
1544  module procedure mpp_do_update_i8_3d
1545  module procedure mpp_do_update_r4_3d
1546  module procedure mpp_do_update_r4_3dv
1547 #ifdef OVERLOAD_C4
1548  module procedure mpp_do_update_c4_3d
1549 #endif
1550  module procedure mpp_do_update_i4_3d
1551  end interface
1552  !> Private interface to updates data domain of 3D field whose computational domains have been computed
1553  !> @ingroup mpp_domains_mod
1554  interface mpp_do_check
1555  module procedure mpp_do_check_r8_3d
1556  module procedure mpp_do_check_r8_3dv
1557 #ifdef OVERLOAD_C8
1558  module procedure mpp_do_check_c8_3d
1559 #endif
1560  module procedure mpp_do_check_i8_3d
1561  module procedure mpp_do_check_r4_3d
1562  module procedure mpp_do_check_r4_3dv
1563 #ifdef OVERLOAD_C4
1564  module procedure mpp_do_check_c4_3d
1565 #endif
1566  module procedure mpp_do_check_i4_3d
1567  end interface
1568 
1569  !> Passes data from a structured grid to an unstructured grid
1570  !! <br>Example usage:
1571  !!
1572  !! call mpp_pass_SG_to_UG(domain, sg_data, ug_data)
1573  !> @ingroup mpp_domains_mod
1575  module procedure mpp_pass_sg_to_ug_r8_2d
1576  module procedure mpp_pass_sg_to_ug_r8_3d
1577  module procedure mpp_pass_sg_to_ug_r4_2d
1578  module procedure mpp_pass_sg_to_ug_r4_3d
1579  module procedure mpp_pass_sg_to_ug_i4_2d
1580  module procedure mpp_pass_sg_to_ug_i4_3d
1581  module procedure mpp_pass_sg_to_ug_l4_2d
1582  module procedure mpp_pass_sg_to_ug_l4_3d
1583  end interface
1584 
1585  !> Passes a data field from a structured grid to an unstructured grid
1586  !! <br>Example usage:
1587  !!
1588  !! call mpp_pass_SG_to_UG(SG_domain, field_SG, field_UG)
1589  !> @ingroup mpp_domains_mod
1591  module procedure mpp_pass_ug_to_sg_r8_2d
1592  module procedure mpp_pass_ug_to_sg_r8_3d
1593  module procedure mpp_pass_ug_to_sg_r4_2d
1594  module procedure mpp_pass_ug_to_sg_r4_3d
1595  module procedure mpp_pass_ug_to_sg_i4_2d
1596  module procedure mpp_pass_ug_to_sg_i4_3d
1597  module procedure mpp_pass_ug_to_sg_l4_2d
1598  module procedure mpp_pass_ug_to_sg_l4_3d
1599  end interface
1600 
1601  !> Passes a data field from a unstructured grid to an structured grid
1602  !! <br>Example usage:
1603  !!
1604  !! call mpp_pass_UG_to_SG(UG_domain, field_UG, field_SG)
1605  !!
1606  !> @ingroup mpp_domains_mod
1608  module procedure mpp_do_update_ad_r8_3d
1609  module procedure mpp_do_update_ad_r8_3dv
1610  module procedure mpp_do_update_ad_r4_3d
1611  module procedure mpp_do_update_ad_r4_3dv
1612  end interface
1613 
1614 !> Get the boundary data for symmetric domain when the data is at C, E, or N-cell center.<br>
1615 !! @ref mpp_get_boundary is used to get the boundary data for symmetric domain
1616 !! when the data is at C, E, or N-cell center. For cubic grid, the data should always
1617 !! at C-cell center.
1618 !! <br>Example usage:
1619 !!
1620 !! call mpp_get_boundary(domain, field, ebuffer, sbuffer, wbuffer, nbuffer)
1621 !! Get boundary information from domain and field and store in buffers
1622 !> @ingroup mpp_domains_mod
1624  module procedure mpp_get_boundary_r8_2d
1625  module procedure mpp_get_boundary_r8_3d
1626 ! module procedure mpp_get_boundary_r8_4d
1627 ! module procedure mpp_get_boundary_r8_5d
1628  module procedure mpp_get_boundary_r8_2dv
1629  module procedure mpp_get_boundary_r8_3dv
1630 ! module procedure mpp_get_boundary_r8_4dv
1631 ! module procedure mpp_get_boundary_r8_5dv
1632  module procedure mpp_get_boundary_r4_2d
1633  module procedure mpp_get_boundary_r4_3d
1634 ! module procedure mpp_get_boundary_r4_4d
1635 ! module procedure mpp_get_boundary_r4_5d
1636  module procedure mpp_get_boundary_r4_2dv
1637  module procedure mpp_get_boundary_r4_3dv
1638 ! module procedure mpp_get_boundary_r4_4dv
1639 ! module procedure mpp_get_boundary_r4_5dv
1640  end interface
1641 
1642  !> @ingroup mpp_domains_mod
1644  module procedure mpp_get_boundary_ad_r8_2d
1645  module procedure mpp_get_boundary_ad_r8_3d
1646  module procedure mpp_get_boundary_ad_r8_2dv
1647  module procedure mpp_get_boundary_ad_r8_3dv
1648  module procedure mpp_get_boundary_ad_r4_2d
1649  module procedure mpp_get_boundary_ad_r4_3d
1650  module procedure mpp_get_boundary_ad_r4_2dv
1651  module procedure mpp_get_boundary_ad_r4_3dv
1652  end interface
1653 
1654  !> @ingroup mpp_domains_mod
1656  module procedure mpp_do_get_boundary_r8_3d
1657  module procedure mpp_do_get_boundary_r8_3dv
1658  module procedure mpp_do_get_boundary_r4_3d
1659  module procedure mpp_do_get_boundary_r4_3dv
1660  end interface
1661 
1662  !> @ingroup mpp_domains_mod
1664  module procedure mpp_do_get_boundary_ad_r8_3d
1665  module procedure mpp_do_get_boundary_ad_r8_3dv
1666  module procedure mpp_do_get_boundary_ad_r4_3d
1667  module procedure mpp_do_get_boundary_ad_r4_3dv
1668  end interface
1669 
1670 !> Reorganization of distributed global arrays.<br>
1671 !! \e mpp_redistribute is used to reorganize a distributed array.
1672 !! \e MPP_TYPE_can be of type \e integer, \e complex, or \e real;
1673 !! of 4-byte or 8-byte kind; of rank up to 5.
1674 !! <br>Example usage:
1675 !! call mpp_redistribute( domain_in, field_in, domain_out, field_out )
1676 !> @ingroup mpp_domains_mod
1678  module procedure mpp_redistribute_r8_2d
1679  module procedure mpp_redistribute_r8_3d
1680  module procedure mpp_redistribute_r8_4d
1681  module procedure mpp_redistribute_r8_5d
1682 #ifdef OVERLOAD_C8
1683  module procedure mpp_redistribute_c8_2d
1684  module procedure mpp_redistribute_c8_3d
1685  module procedure mpp_redistribute_c8_4d
1686  module procedure mpp_redistribute_c8_5d
1687 #endif
1688  module procedure mpp_redistribute_i8_2d
1689  module procedure mpp_redistribute_i8_3d
1690  module procedure mpp_redistribute_i8_4d
1691  module procedure mpp_redistribute_i8_5d
1692 !!$ module procedure mpp_redistribute_l8_2D
1693 !!$ module procedure mpp_redistribute_l8_3D
1694 !!$ module procedure mpp_redistribute_l8_4D
1695 !!$ module procedure mpp_redistribute_l8_5D
1696  module procedure mpp_redistribute_r4_2d
1697  module procedure mpp_redistribute_r4_3d
1698  module procedure mpp_redistribute_r4_4d
1699  module procedure mpp_redistribute_r4_5d
1700 #ifdef OVERLOAD_C4
1701  module procedure mpp_redistribute_c4_2d
1702  module procedure mpp_redistribute_c4_3d
1703  module procedure mpp_redistribute_c4_4d
1704  module procedure mpp_redistribute_c4_5d
1705 #endif
1706  module procedure mpp_redistribute_i4_2d
1707  module procedure mpp_redistribute_i4_3d
1708  module procedure mpp_redistribute_i4_4d
1709  module procedure mpp_redistribute_i4_5d
1710 !!$ module procedure mpp_redistribute_l4_2D
1711 !!$ module procedure mpp_redistribute_l4_3D
1712 !!$ module procedure mpp_redistribute_l4_4D
1713 !!$ module procedure mpp_redistribute_l4_5D
1714  end interface
1715 
1716  !> @ingroup mpp_domains_mod
1718  module procedure mpp_do_redistribute_r8_3d
1719 #ifdef OVERLOAD_C8
1720  module procedure mpp_do_redistribute_c8_3d
1721 #endif
1722  module procedure mpp_do_redistribute_i8_3d
1723  module procedure mpp_do_redistribute_l8_3d
1724  module procedure mpp_do_redistribute_r4_3d
1725 #ifdef OVERLOAD_C4
1726  module procedure mpp_do_redistribute_c4_3d
1727 #endif
1728  module procedure mpp_do_redistribute_i4_3d
1729  module procedure mpp_do_redistribute_l4_3d
1730  end interface
1731 
1732 !> Parallel checking between two ensembles which run on different set pes at the same time<br>
1733 !! There are two forms for the <TT>mpp_check_field</TT> call. The 2D
1734 !! version is generally to be used and 3D version is built by repeated calls to the
1735 !! 2D version.<br>
1736 !! <br>Example usage:
1737 !! @code{.F90}
1738 !! call mpp_check_field(field_in, pelist1, pelist2, domain, mesg, &
1739 !! w_halo, s_halo, e_halo, n_halo, force_abort )
1740 !! @endcode
1741 !! @param field_in Field to be checked
1742 !! @param domain Domain of current pe
1743 !! @param mesg Message to be printed out
1744 !! @param w_halo Halo size to be checked, default is 0
1745 !! @param s_halo Halo size to be checked, default is 0
1746 !! @param e_halo Halo size to be checked, default is 0
1747 !! @param n_halo Halo size to be checked, default is 0
1748 !! @param force_abort When true, abort program when any difference found. Default is false.
1749 !> @ingroup mpp_domains_mod
1751  module procedure mpp_check_field_2d
1752  module procedure mpp_check_field_3d
1753  end interface
1754 
1755 !***********************************************************************
1756 !
1757 ! public interface from mpp_domains_reduce.h
1758 !
1759 !***********************************************************************
1760 
1761 !> Fill in a global array from domain-decomposed arrays.<br>
1762 !!
1763 !> <TT>mpp_global_field</TT> is used to get an entire
1764 !! domain-decomposed array on each PE. <TT>MPP_TYPE_</TT> can be of type
1765 !! <TT>complex</TT>, <TT>integer</TT>, <TT>logical</TT> or <TT>real</TT>;
1766 !! of 4-byte or 8-byte kind; of rank up to 5.<br>
1767 !!
1768 !! All PEs in a domain decomposition must call
1769 !! <TT>mpp_global_field</TT>, and each will have a complete global field
1770 !! at the end. Please note that a global array of rank 3 or higher could
1771 !! occupy a lot of memory.
1772 !!
1773 !! @param domain 2D domain
1774 !! @param local Data dimensioned on either the compute or data domains of 'domain'
1775 !! @param[out] global output data dimensioned on the corresponding global domain
1776 !! @param flags can be either XONLY or YONLY parameters to specify a globalization on one axis only
1777 !!
1778 !! <br> Example usage:
1779 !! @code{.F90}
1780 !! call mpp_global_field( domain, local, global, flags )
1781 !! @endcode
1782 !> @ingroup mpp_domains_mod
1784  module procedure mpp_global_field2d_r8_2d
1785  module procedure mpp_global_field2d_r8_3d
1786  module procedure mpp_global_field2d_r8_4d
1787  module procedure mpp_global_field2d_r8_5d
1788 #ifdef OVERLOAD_C8
1789  module procedure mpp_global_field2d_c8_2d
1790  module procedure mpp_global_field2d_c8_3d
1791  module procedure mpp_global_field2d_c8_4d
1792  module procedure mpp_global_field2d_c8_5d
1793 #endif
1794  module procedure mpp_global_field2d_i8_2d
1795  module procedure mpp_global_field2d_i8_3d
1796  module procedure mpp_global_field2d_i8_4d
1797  module procedure mpp_global_field2d_i8_5d
1798  module procedure mpp_global_field2d_l8_2d
1799  module procedure mpp_global_field2d_l8_3d
1800  module procedure mpp_global_field2d_l8_4d
1801  module procedure mpp_global_field2d_l8_5d
1802  module procedure mpp_global_field2d_r4_2d
1803  module procedure mpp_global_field2d_r4_3d
1804  module procedure mpp_global_field2d_r4_4d
1805  module procedure mpp_global_field2d_r4_5d
1806 #ifdef OVERLOAD_C4
1807  module procedure mpp_global_field2d_c4_2d
1808  module procedure mpp_global_field2d_c4_3d
1809  module procedure mpp_global_field2d_c4_4d
1810  module procedure mpp_global_field2d_c4_5d
1811 #endif
1812  module procedure mpp_global_field2d_i4_2d
1813  module procedure mpp_global_field2d_i4_3d
1814  module procedure mpp_global_field2d_i4_4d
1815  module procedure mpp_global_field2d_i4_5d
1816  module procedure mpp_global_field2d_l4_2d
1817  module procedure mpp_global_field2d_l4_3d
1818  module procedure mpp_global_field2d_l4_4d
1819  module procedure mpp_global_field2d_l4_5d
1820  end interface
1821 
1822 !> @ingroup mpp_domains_mod
1824  module procedure mpp_global_field2d_r8_2d_ad
1825  module procedure mpp_global_field2d_r8_3d_ad
1826  module procedure mpp_global_field2d_r8_4d_ad
1827  module procedure mpp_global_field2d_r8_5d_ad
1828 #ifdef OVERLOAD_C8
1829  module procedure mpp_global_field2d_c8_2d_ad
1830  module procedure mpp_global_field2d_c8_3d_ad
1831  module procedure mpp_global_field2d_c8_4d_ad
1832  module procedure mpp_global_field2d_c8_5d_ad
1833 #endif
1834  module procedure mpp_global_field2d_i8_2d_ad
1835  module procedure mpp_global_field2d_i8_3d_ad
1836  module procedure mpp_global_field2d_i8_4d_ad
1837  module procedure mpp_global_field2d_i8_5d_ad
1838  module procedure mpp_global_field2d_l8_2d_ad
1839  module procedure mpp_global_field2d_l8_3d_ad
1840  module procedure mpp_global_field2d_l8_4d_ad
1841  module procedure mpp_global_field2d_l8_5d_ad
1842  module procedure mpp_global_field2d_r4_2d_ad
1843  module procedure mpp_global_field2d_r4_3d_ad
1844  module procedure mpp_global_field2d_r4_4d_ad
1845  module procedure mpp_global_field2d_r4_5d_ad
1846 #ifdef OVERLOAD_C4
1847  module procedure mpp_global_field2d_c4_2d_ad
1848  module procedure mpp_global_field2d_c4_3d_ad
1849  module procedure mpp_global_field2d_c4_4d_ad
1850  module procedure mpp_global_field2d_c4_5d_ad
1851 #endif
1852  module procedure mpp_global_field2d_i4_2d_ad
1853  module procedure mpp_global_field2d_i4_3d_ad
1854  module procedure mpp_global_field2d_i4_4d_ad
1855  module procedure mpp_global_field2d_i4_5d_ad
1856  module procedure mpp_global_field2d_l4_2d_ad
1857  module procedure mpp_global_field2d_l4_3d_ad
1858  module procedure mpp_global_field2d_l4_4d_ad
1859  module procedure mpp_global_field2d_l4_5d_ad
1860  end interface
1861 
1862 !> Private helper interface used by @ref mpp_global_field
1863 !> @ingroup mpp_domains_mod
1865  module procedure mpp_do_global_field2d_r8_3d
1866 #ifdef OVERLOAD_C8
1867  module procedure mpp_do_global_field2d_c8_3d
1868 #endif
1869  module procedure mpp_do_global_field2d_i8_3d
1870  module procedure mpp_do_global_field2d_l8_3d
1871  module procedure mpp_do_global_field2d_r4_3d
1872 #ifdef OVERLOAD_C4
1873  module procedure mpp_do_global_field2d_c4_3d
1874 #endif
1875  module procedure mpp_do_global_field2d_i4_3d
1876  module procedure mpp_do_global_field2d_l4_3d
1877  end interface
1878 
1880  module procedure mpp_do_global_field2d_a2a_r8_3d
1881 #ifdef OVERLOAD_C8
1882  module procedure mpp_do_global_field2d_a2a_c8_3d
1883 #endif
1884  module procedure mpp_do_global_field2d_a2a_i8_3d
1885  module procedure mpp_do_global_field2d_a2a_l8_3d
1886  module procedure mpp_do_global_field2d_a2a_r4_3d
1887 #ifdef OVERLOAD_C4
1888  module procedure mpp_do_global_field2d_a2a_c4_3d
1889 #endif
1890  module procedure mpp_do_global_field2d_a2a_i4_3d
1891  module procedure mpp_do_global_field2d_a2a_l4_3d
1892  end interface
1893 
1894 !> Same functionality as @ref mpp_global_field but for unstructured domains
1895 !> @ingroup mpp_domains_mod
1897  module procedure mpp_global_field2d_ug_r8_2d
1898  module procedure mpp_global_field2d_ug_r8_3d
1899  module procedure mpp_global_field2d_ug_r8_4d
1900  module procedure mpp_global_field2d_ug_r8_5d
1901  module procedure mpp_global_field2d_ug_i8_2d
1902  module procedure mpp_global_field2d_ug_i8_3d
1903  module procedure mpp_global_field2d_ug_i8_4d
1904  module procedure mpp_global_field2d_ug_i8_5d
1905  module procedure mpp_global_field2d_ug_r4_2d
1906  module procedure mpp_global_field2d_ug_r4_3d
1907  module procedure mpp_global_field2d_ug_r4_4d
1908  module procedure mpp_global_field2d_ug_r4_5d
1909  module procedure mpp_global_field2d_ug_i4_2d
1910  module procedure mpp_global_field2d_ug_i4_3d
1911  module procedure mpp_global_field2d_ug_i4_4d
1912  module procedure mpp_global_field2d_ug_i4_5d
1913  end interface
1914 
1915 !> @ingroup mpp_domains_mod
1917  module procedure mpp_do_global_field2d_r8_3d_ad
1918 #ifdef OVERLOAD_C8
1919  module procedure mpp_do_global_field2d_c8_3d_ad
1920 #endif
1921  module procedure mpp_do_global_field2d_i8_3d_ad
1922  module procedure mpp_do_global_field2d_l8_3d_ad
1923  module procedure mpp_do_global_field2d_r4_3d_ad
1924 #ifdef OVERLOAD_C4
1925  module procedure mpp_do_global_field2d_c4_3d_ad
1926 #endif
1927  module procedure mpp_do_global_field2d_i4_3d_ad
1928  module procedure mpp_do_global_field2d_l4_3d_ad
1929  end interface
1930 
1931 !> Global max of domain-decomposed arrays.<br>
1932 !! \e mpp_global_max is used to get the maximum value of a
1933 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1934 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1935 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1936 !!<br>
1937 !! All PEs in a domain decomposition must call \e mpp_global_max,
1938 !! and each will have the result upon exit.
1939 !! The function \e mpp_global_min, with an identical syntax. is also available.
1940 !!
1941 !! @param domain 2D domain
1942 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1943 !! @param locus If present, van be used to retrieve the location of the maximum
1944 !!
1945 !! <br>Example usage:
1946 !! mpp_global_max( domain, field, locus )
1947 !> @ingroup mpp_domains_mod
1948  interface mpp_global_max
1949  module procedure mpp_global_max_r8_2d
1950  module procedure mpp_global_max_r8_3d
1951  module procedure mpp_global_max_r8_4d
1952  module procedure mpp_global_max_r8_5d
1953  module procedure mpp_global_max_r4_2d
1954  module procedure mpp_global_max_r4_3d
1955  module procedure mpp_global_max_r4_4d
1956  module procedure mpp_global_max_r4_5d
1957  module procedure mpp_global_max_i8_2d
1958  module procedure mpp_global_max_i8_3d
1959  module procedure mpp_global_max_i8_4d
1960  module procedure mpp_global_max_i8_5d
1961  module procedure mpp_global_max_i4_2d
1962  module procedure mpp_global_max_i4_3d
1963  module procedure mpp_global_max_i4_4d
1964  module procedure mpp_global_max_i4_5d
1965  end interface
1966 
1967 !> Global min of domain-decomposed arrays.<br>
1968 !! \e mpp_global_min is used to get the minimum value of a
1969 !! domain-decomposed array on each PE. \e MPP_TYPE_can be of type
1970 !! \e integer or \e real; of 4-byte or 8-byte kind; of rank
1971 !! up to 5. The dimension of \e locus must equal the rank of \e field.<br>
1972 !!<br>
1973 !! All PEs in a domain decomposition must call \e mpp_global_min,
1974 !! and each will have the result upon exit.
1975 !! The function \e mpp_global_max, with an identical syntax. is also available.
1976 !!
1977 !! @param domain 2D domain
1978 !! @param field field data dimensioned on either the compute or data domains of 'domain'
1979 !! @param locus If present, van be used to retrieve the location of the minimum
1980 !!
1981 !! <br>Example usage:
1982 !! mpp_global_min( domain, field, locus )
1983 !> @ingroup mpp_domains_mod
1984  interface mpp_global_min
1985  module procedure mpp_global_min_r8_2d
1986  module procedure mpp_global_min_r8_3d
1987  module procedure mpp_global_min_r8_4d
1988  module procedure mpp_global_min_r8_5d
1989  module procedure mpp_global_min_r4_2d
1990  module procedure mpp_global_min_r4_3d
1991  module procedure mpp_global_min_r4_4d
1992  module procedure mpp_global_min_r4_5d
1993  module procedure mpp_global_min_i8_2d
1994  module procedure mpp_global_min_i8_3d
1995  module procedure mpp_global_min_i8_4d
1996  module procedure mpp_global_min_i8_5d
1997  module procedure mpp_global_min_i4_2d
1998  module procedure mpp_global_min_i4_3d
1999  module procedure mpp_global_min_i4_4d
2000  module procedure mpp_global_min_i4_5d
2001  end interface
2002 
2003 !> Global sum of domain-decomposed arrays.<br>
2004 !! \e mpp_global_sum is used to get the sum of a domain-decomposed array
2005 !! on each PE. \e MPP_TYPE_ can be of type \e integer, \e complex, or \e real; of 4-byte or
2006 !! 8-byte kind; of rank up to 5.
2007 !!
2008 !! @param domain 2D domain
2009 !! @param field field data dimensioned on either the compute or data domain of 'domain'
2010 !! @param flags If present must have the value BITWISE_EXACT_SUM. This produces a sum that
2011 !! is guaranteed to produce the identical result irrespective of how the domain is decomposed.
2012 !! This method does the sum first along the ranks beyond 2, and then calls mpp_global_field
2013 !! to produce a global 2D array which is then summed. The default method, which is
2014 !! considerably faster, does a local sum followed by mpp_sum across the domain
2015 !! decomposition.
2016 !!
2017 !! <br>Example usage:
2018 !! call mpp_global_sum( domain, field, flags )
2019 !! @note All PEs in a domain decomposition must call \e mpp_global_sum,
2020 !! and each will have the result upon exit.
2021 !> @ingroup mpp_domains_mod
2022  interface mpp_global_sum
2023  module procedure mpp_global_sum_r8_2d
2024  module procedure mpp_global_sum_r8_3d
2025  module procedure mpp_global_sum_r8_4d
2026  module procedure mpp_global_sum_r8_5d
2027 #ifdef OVERLOAD_C8
2028  module procedure mpp_global_sum_c8_2d
2029  module procedure mpp_global_sum_c8_3d
2030  module procedure mpp_global_sum_c8_4d
2031  module procedure mpp_global_sum_c8_5d
2032 #endif
2033  module procedure mpp_global_sum_r4_2d
2034  module procedure mpp_global_sum_r4_3d
2035  module procedure mpp_global_sum_r4_4d
2036  module procedure mpp_global_sum_r4_5d
2037 #ifdef OVERLOAD_C4
2038  module procedure mpp_global_sum_c4_2d
2039  module procedure mpp_global_sum_c4_3d
2040  module procedure mpp_global_sum_c4_4d
2041  module procedure mpp_global_sum_c4_5d
2042 #endif
2043  module procedure mpp_global_sum_i8_2d
2044  module procedure mpp_global_sum_i8_3d
2045  module procedure mpp_global_sum_i8_4d
2046  module procedure mpp_global_sum_i8_5d
2047  module procedure mpp_global_sum_i4_2d
2048  module procedure mpp_global_sum_i4_3d
2049  module procedure mpp_global_sum_i4_4d
2050  module procedure mpp_global_sum_i4_5d
2051  end interface
2052 
2053 !gag
2054 !> @ingroup mpp_domains_mod
2056  module procedure mpp_global_sum_tl_r8_2d
2057  module procedure mpp_global_sum_tl_r8_3d
2058  module procedure mpp_global_sum_tl_r8_4d
2059  module procedure mpp_global_sum_tl_r8_5d
2060 #ifdef OVERLOAD_C8
2061  module procedure mpp_global_sum_tl_c8_2d
2062  module procedure mpp_global_sum_tl_c8_3d
2063  module procedure mpp_global_sum_tl_c8_4d
2064  module procedure mpp_global_sum_tl_c8_5d
2065 #endif
2066  module procedure mpp_global_sum_tl_r4_2d
2067  module procedure mpp_global_sum_tl_r4_3d
2068  module procedure mpp_global_sum_tl_r4_4d
2069  module procedure mpp_global_sum_tl_r4_5d
2070 #ifdef OVERLOAD_C4
2071  module procedure mpp_global_sum_tl_c4_2d
2072  module procedure mpp_global_sum_tl_c4_3d
2073  module procedure mpp_global_sum_tl_c4_4d
2074  module procedure mpp_global_sum_tl_c4_5d
2075 #endif
2076  module procedure mpp_global_sum_tl_i8_2d
2077  module procedure mpp_global_sum_tl_i8_3d
2078  module procedure mpp_global_sum_tl_i8_4d
2079  module procedure mpp_global_sum_tl_i8_5d
2080  module procedure mpp_global_sum_tl_i4_2d
2081  module procedure mpp_global_sum_tl_i4_3d
2082  module procedure mpp_global_sum_tl_i4_4d
2083  module procedure mpp_global_sum_tl_i4_5d
2084  end interface
2085 !gag
2086 
2087 !bnc
2088 !> @ingroup mpp_domains_mod
2090  module procedure mpp_global_sum_ad_r8_2d
2091  module procedure mpp_global_sum_ad_r8_3d
2092  module procedure mpp_global_sum_ad_r8_4d
2093  module procedure mpp_global_sum_ad_r8_5d
2094 #ifdef OVERLOAD_C8
2095  module procedure mpp_global_sum_ad_c8_2d
2096  module procedure mpp_global_sum_ad_c8_3d
2097  module procedure mpp_global_sum_ad_c8_4d
2098  module procedure mpp_global_sum_ad_c8_5d
2099 #endif
2100  module procedure mpp_global_sum_ad_r4_2d
2101  module procedure mpp_global_sum_ad_r4_3d
2102  module procedure mpp_global_sum_ad_r4_4d
2103  module procedure mpp_global_sum_ad_r4_5d
2104 #ifdef OVERLOAD_C4
2105  module procedure mpp_global_sum_ad_c4_2d
2106  module procedure mpp_global_sum_ad_c4_3d
2107  module procedure mpp_global_sum_ad_c4_4d
2108  module procedure mpp_global_sum_ad_c4_5d
2109 #endif
2110  module procedure mpp_global_sum_ad_i8_2d
2111  module procedure mpp_global_sum_ad_i8_3d
2112  module procedure mpp_global_sum_ad_i8_4d
2113  module procedure mpp_global_sum_ad_i8_5d
2114  module procedure mpp_global_sum_ad_i4_2d
2115  module procedure mpp_global_sum_ad_i4_3d
2116  module procedure mpp_global_sum_ad_i4_4d
2117  module procedure mpp_global_sum_ad_i4_5d
2118  end interface
2119 !bnc
2120 
2121 !***********************************************************************
2122 !
2123 ! public interface from mpp_domain_util.h
2124 !
2125 !***********************************************************************
2126  !> @brief Retrieve PE number of a neighboring domain.
2127  !!
2128  !> Given a 1-D or 2-D domain decomposition, this call allows users to retrieve
2129  !! the PE number of an adjacent PE-domain while taking into account that the
2130  !! domain may have holes (masked) and/or have cyclic boundary conditions and/or a
2131  !! folded edge. Which PE-domain will be retrived will depend on "direction":
2132  !! +1 (right) or -1 (left) for a 1-D domain decomposition and either NORTH, SOUTH,
2133  !! EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST, or NORTH_WEST for a 2-D
2134  !! decomposition. If no neighboring domain exists (masked domain), then the
2135  !! returned "pe" value will be set to NULL_PE.<br>
2136  !! <br>Example usage:
2137  !!
2138  !! call mpp_get_neighbor_pe( domain1d, direction=+1 , pe)
2139  !!
2140  !! Set pe to the neighbor pe number that is to the right of the current pe
2141  !!
2142  !! call mpp_get_neighbor_pe( domain2d, direction=NORTH, pe)
2143  !!
2144  !! Get neighbor pe number that's above/north of the current pe
2145  !> @ingroup mpp_domains_mod
2147  module procedure mpp_get_neighbor_pe_1d
2148  module procedure mpp_get_neighbor_pe_2d
2149  end interface
2150 
2151  !> Equality/inequality operators for domaintypes. <br>
2152  !!
2153  !! <br>The module provides public operators to check for
2154  !! equality/inequality of domaintypes, e.g:<br>
2155  !!
2156  !! type(domain1D) :: a, b
2157  !! type(domain2D) :: c, d
2158  !! ...
2159  !! if( a.NE.b )then
2160  !! ...
2161  !! end if
2162  !! if( c==d )then
2163  !! ...
2164  !! end if
2165  !!<br>
2166  !! Domains are considered equal if and only if the start and end
2167  !! indices of each of their component global, data and compute domains are equal.
2168  !> @ingroup mpp_domains_mod
2169  interface operator(.EQ.)
2170  module procedure mpp_domain1d_eq
2171  module procedure mpp_domain2d_eq
2172  module procedure mpp_domainug_eq
2173  end interface
2174 
2175  !> @ingroup mpp_domains_mod
2176  interface operator(.NE.)
2177  module procedure mpp_domain1d_ne
2178  module procedure mpp_domain2d_ne
2179  module procedure mpp_domainug_ne
2180  end interface
2181 
2182  !> These routines retrieve the axis specifications associated with the compute domains.
2183  !! The domain is a derived type with private elements. These routines
2184  !! retrieve the axis specifications associated with the compute domains
2185  !! The 2D version of these is a simple extension of 1D.
2186  !! <br>Example usage:
2187  !!
2188  !! call mpp_get_compute_domain(domain_1D, is, ie)
2189  !! call mpp_get_compute_domain(domain_2D, is, ie, js, je)
2190  !> @ingroup mpp_domains_mod
2192  module procedure mpp_get_compute_domain1d
2193  module procedure mpp_get_compute_domain2d
2194  end interface
2195 
2196  !> Retrieve the entire array of compute domain extents associated with a decomposition.
2197  !!
2198  !> @param domain 2D domain
2199  !> @param[out] xbegin,ybegin x and y domain starting indices
2200  !> @param[out] xsize,ysize x and y domain sizes
2201  !! <br>Example usage:
2202  !!
2203  !! call mpp_get_compute_domains( domain, xbegin, xend, xsize, &
2204  !! ybegin, yend, ysize )
2205  !> @ingroup mpp_domains_mod
2207  module procedure mpp_get_compute_domains1d
2208  module procedure mpp_get_compute_domains2d
2209  end interface
2210 
2211  !> @ingroup mpp_domains_mod
2213  module procedure mpp_get_global_domains1d
2214  module procedure mpp_get_global_domains2d
2215  end interface
2216 
2217  !> These routines retrieve the axis specifications associated with the data domains.
2218  !! The domain is a derived type with private elements. These routines
2219  !! retrieve the axis specifications associated with the data domains.
2220  !! The 2D version of these is a simple extension of 1D.
2221  !! <br>Example usage:
2222  !!
2223  !! call mpp_get_data_domain(domain_1d, isd, ied)
2224  !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2225  !> @ingroup mpp_domains_mod
2227  module procedure mpp_get_data_domain1d
2228  module procedure mpp_get_data_domain2d
2229  end interface
2230 
2231  !> These routines retrieve the axis specifications associated with the global domains.
2232  !! The domain is a derived type with private elements. These routines
2233  !! retrieve the axis specifications associated with the global domains.
2234  !! The 2D version of these is a simple extension of 1D.
2235  !! <br>Example usage:
2236  !!
2237  !! call mpp_get_global_domain(domain_1d, isg, ieg)
2238  !! call mpp_get_global_domain(domain_2d, isg, ieg, jsg, jeg)
2239  !> @ingroup mpp_domains_mod
2241  module procedure mpp_get_global_domain1d
2242  module procedure mpp_get_global_domain2d
2243  end interface
2244 
2245  !> These routines retrieve the axis specifications associated with the memory domains.
2246  !! The domain is a derived type with private elements. These routines
2247  !! retrieve the axis specifications associated with the memory domains.
2248  !! The 2D version of these is a simple extension of 1D.
2249  !! <br>Example usage:
2250  !!
2251  !! call mpp_get_memory_domain(domain_1d, ism, iem)
2252  !! call mpp_get_memory_domain(domain_2d, ism, iem, jsm, jem)
2253  !> @ingroup mpp_domains_mod
2255  module procedure mpp_get_memory_domain1d
2256  module procedure mpp_get_memory_domain2d
2257  end interface
2258 
2259  !> @ingroup mpp_domains_mod
2261  module procedure mpp_get_domain_extents1d
2262  module procedure mpp_get_domain_extents2d
2263  end interface
2264 
2265  !> These routines set the axis specifications associated with the compute domains.
2266  !! The domain is a derived type with private elements. These routines
2267  !! set the axis specifications associated with the compute domains
2268  !! The 2D version of these is a simple extension of 1D.
2269  !! <br>Example usage:
2270  !!
2271  !! call mpp_get_data_domain(domain_1d, isd, ied)
2272  !! call mpp_get_data_domain(domain_2d, isd, ied, jsd, jed)
2273  !> @ingroup mpp_domains_mod
2275  module procedure mpp_set_compute_domain1d
2276  module procedure mpp_set_compute_domain2d
2277  end interface
2278 
2279  !> These routines set the axis specifications associated with the data domains.
2280  !! The domain is a derived type with private elements. These routines
2281  !! set the axis specifications associated with the data domains.
2282  !! The 2D version of these is a simple extension of 1D.
2283  !! <br>Example usage:
2284  !!
2285  !! call mpp_set_data_domain(domain_1d, isd, ied)
2286  !! call mpp_set_data_domain(domain_2d, isd, ied, jsd, jed)
2287  !> @ingroup mpp_domains_mod
2289  module procedure mpp_set_data_domain1d
2290  module procedure mpp_set_data_domain2d
2291  end interface
2292 
2293  !> These routines set the axis specifications associated with the global domains.
2294  !! The domain is a derived type with private elements. These routines
2295  !! set the axis specifications associated with the global domains.
2296  !! The 2D version of these is a simple extension of 1D.
2297  !! <br>Example usage:
2298  !!
2299  !! call mpp_set_global_domain(domain_1d, isg, ieg)
2300  !! call mpp_set_global_domain(domain_2d, isg, ieg, jsg, jeg)
2301  !> @ingroup mpp_domains_mod
2303  module procedure mpp_set_global_domain1d
2304  module procedure mpp_set_global_domain2d
2305  end interface
2306 
2307  !> Retrieve list of PEs associated with a domain decomposition.
2308  !! The 1D version of this call returns an array of the PEs assigned to
2309  !! this 1D domain decomposition. In addition the optional argument pos may be
2310  !! used to retrieve the 0-based position of the domain local to the
2311  !! calling PE, i.e., <TT> domain\%list(pos)\%pe</TT> is the local PE,
2312  !! as returned by @ref mpp_pe().
2313  !! The 2D version of this call is identical to 1D version.
2314  !> @ingroup mpp_domains_mod
2315  interface mpp_get_pelist
2316  module procedure mpp_get_pelist1d
2317  module procedure mpp_get_pelist2d
2318  end interface
2319 
2320  !> Retrieve layout associated with a domain decomposition
2321  !! The 1D version of this call returns the number of divisions that was assigned to this
2322  !! decomposition axis. The 2D version of this call returns an array of dimension 2 holding the
2323  !! results on two axes.
2324  !! <br>Example usage:
2325  !!
2326  !! call mpp_get_layout( domain, layout )
2327  !> @ingroup mpp_domains_mod
2328  interface mpp_get_layout
2329  module procedure mpp_get_layout1d
2330  module procedure mpp_get_layout2d
2331  end interface
2332  !> Private interface for internal usage, compares two sizes
2333  !> @ingroup mpp_domains_mod
2335  module procedure check_data_size_1d
2336  module procedure check_data_size_2d
2337  end interface
2338 
2339  !> Nullify domain list. This interface is needed in mpp_domains_test.
2340  !! 1-D case can be added in if needed.
2341  !! <br>Example usage:
2342  !!
2343  !! call mpp_nullify_domain_list(domain)
2344  !> @ingroup mpp_domains_mod
2346  module procedure nullify_domain2d_list
2347  end interface
2348 
2349  ! Include variable "version" to be written to log file.
2350 #include<file_version.h>
2351  public version
2352 
2353 
2354 contains
2355 
2356 #include <mpp_define_nest_domains.inc>
2357 #include <mpp_domains_util.inc>
2358 #include <mpp_domains_comm.inc>
2359 #include <mpp_domains_define.inc>
2360 #include <mpp_domains_misc.inc>
2361 #include <mpp_domains_reduce.inc>
2362 #include <mpp_unstruct_domain.inc>
2363 
2364 end module mpp_domains_mod
subroutine mpp_get_overlap(domain, action, p, is, ie, js, je, dir, rot, position)
Set user stack size.
subroutine mpp_get_neighbor_pe_2d(domain, direction, pe)
Return PE North/South/East/West of this PE-domain. direction must be NORTH, SOUTH,...
subroutine mpp_get_global_domains1d(domain, begin, end, size)
Set user stack size.
integer function mpp_get_domain_npes(domain)
Set user stack size.
subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
integer, save a2_sort_len
length sorted memory list
subroutine mpp_define_nest_domains(nest_domain, domain, num_nest, nest_level, tile_fine, tile_coarse, istart_coarse, icount_coarse, jstart_coarse, jcount_coarse, npes_nest_tile, x_refine, y_refine, extra_halo, name)
Set up a domain to pass data between aligned coarse and fine grid of nested model.
logical function mpp_domainug_ne(a, b)
Overload the .ne. for UG.
subroutine mpp_modify_domain2d(domain_in, domain_out, isc, iec, jsc, jec, isg, ieg, jsg, jeg, whalo, ehalo, shalo, nhalo)
logical function mpp_domain2d_eq(a, b)
Set user stack size.
integer function mpp_get_tile_npes(domain)
Returns number of processors used on current tile.
integer, dimension(-1:max_dom_ids), save ids_idx
index of d_comm associated with sorted addesses
integer nthread_control_loop
Determine the loop order for packing and unpacking. When number of threads is greater than nthread_co...
integer, save i_sort_len
length sorted domain ids list
subroutine mpp_get_global_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, tile_count, position)
Set user stack size.
subroutine mpp_get_pelist1d(domain, pelist, pos)
Set user stack size.
subroutine mpp_set_global_domain1d(domain, begin, end, size)
Set user stack size.
subroutine mpp_get_layout2d(domain, layout)
Set user stack size.
subroutine mpp_get_tile_pelist(domain, pelist)
Get the processors list used on current tile.
logical efp_sum_overflow_check
If .true., always do overflow_check when doing EFP bitwise mpp_global_sum.
subroutine mpp_get_domain_components(domain, x, y, tile_count)
Retrieve 1D components of 2D decomposition.
subroutine mpp_get_domain_extents1d(domain, xextent, yextent)
Set user stack size.
logical function mpp_group_update_is_set(group)
Set user stack size.
subroutine logical mpp_broadcast_domain_1(domain)
broadcast domain (useful only outside the context of its own pelist)
integer function mpp_get_domain_tile_commid(domain)
Set user stack size.
character(len=32) debug_update_domain
namelist interface
subroutine mpp_domains_init(flags)
Initialize domain decomp package.
subroutine mpp_set_global_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, tile_count)
Set user stack size.
logical function mpp_domain1d_eq(a, b)
Set user stack size.
integer function, dimension(size(domain%tile_id(:))) mpp_get_tile_id(domain)
Returns the tile_id on current pe.
logical function mpp_domain_is_symmetry(domain)
Set user stack size.
subroutine mpp_create_super_grid_domain(domain)
Modifies the indices of the input domain to create the supergrid domain.
logical function mpp_mosaic_defined()
Accessor function for value of mosaic_defined.
integer function mpp_get_num_overlap(domain, action, p, position)
Set user stack size.
integer function mpp_get_current_ntile(domain)
Returns number of tile on current pe.
subroutine mpp_define_domains1d(global_indices, ndivs, domain, pelist, flags, halo, extent, maskmap, memory_size, begin_halo, end_halo)
Define data and computational domains on a 1D set of data (isg:ieg) and assign them to PEs.
subroutine mpp_set_domain_symmetry(domain, symmetry)
Set user stack size.
integer function mpp_get_ntile_count(domain)
Returns number of tiles in mosaic.
subroutine mpp_get_domain_extents2d(domain, xextent, yextent)
This will return xextent and yextent for each tile.
logical debug_message_passing
Will check the consistency on the boundary between processor/tile when updating domain for symmetric ...
subroutine mpp_get_global_domain1d(domain, begin, end, size, max_size)
Set user stack size.
integer function, dimension(2) mpp_get_io_domain_layout(domain)
Set user stack size.
subroutine mpp_shift_nest_domains(nest_domain, domain, delta_i_coarse, delta_j_coarse, extra_halo)
Based on mpp_define_nest_domains, but just resets positioning of nest Modifies the parent/coarse star...
subroutine mpp_get_neighbor_pe_1d(domain, direction, pe)
Return PE to the righ/left of this PE-domain.
subroutine mpp_define_mosaic_pelist(sizes, pe_start, pe_end, pelist, costpertile)
Defines a pelist for use with mosaic tiles.
subroutine mpp_get_tile_compute_domains(domain, xbegin, xend, ybegin, yend, position)
Set user stack size.
subroutine mpp_get_compute_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, tile_count, position)
Set user stack size.
subroutine mpp_get_compute_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
subroutine mpp_define_io_domain(domain, io_layout)
Define the layout for IO pe's for the given domain.
subroutine mpp_get_f2c_index_coarse(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, nest_level, position)
subroutine mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
Computes extents for a grid decomposition with the given indices and divisions.
integer, dimension(-1:max_fields), save d_comm_idx
index of d_comm associated with sorted addresses
logical function mpp_domain1d_ne(a, b)
Set user stack size.
subroutine mpp_modify_domain1d(domain_in, domain_out, cbegin, cend, gbegin, gend, hbegin, hend)
Modifies the exents of a domain.
subroutine mpp_get_layout1d(domain, layout)
Set user stack size.
subroutine mpp_broadcast_domain_ug(domain)
Broadcast domain (useful only outside the context of its own pelist)
subroutine mpp_get_update_pelist(domain, action, pelist, position)
Set user stack size.
subroutine mpp_set_data_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, x_is_global, y_is_global, tile_count)
Set user stack size.
subroutine mpp_get_compute_domains1d(domain, begin, end, size)
Set user stack size.
integer(i8_kind), parameter addr2_base
= 0x0000000000010000
integer function mpp_get_domain_tile_root_pe(domain)
Set user stack size.
subroutine mpp_check_field_2d(field_in, pelist1, pelist2, domain, mesg, w_halo, s_halo, e_halo, n_halo, force_abort, position)
This routine is used to do parallel checking for 2d data between n and m pe. The comparison is is don...
integer, dimension(-1:max_addrs2), save addrs2_idx
index of addr2 associated with d_comm
subroutine mpp_define_mosaic(global_indices, layout, domain, num_tile, num_contact, tile1, tile2, istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, pe_start, pe_end, pelist, whalo, ehalo, shalo, nhalo, xextent, yextent, maskmap, name, memory_size, symmetry, xflags, yflags, tile_id)
Defines a domain for mosaic tile grids.
integer, save n_comm
number of communicators used
subroutine mpp_domains_exit()
Exit mpp_domains_mod. Serves no particular purpose, but is provided should you require to re-initiali...
subroutine mpp_get_pelist2d(domain, pelist, pos)
Set user stack size.
integer, save n_ids
number of domain ids used (=i_sort_len; domain ids are never removed)
integer, save a_sort_len
length sorted memory list
subroutine mpp_get_memory_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, position)
Set user stack size.
subroutine mpp_check_field_3d(field_in, pelist1, pelist2, domain, mesg, w_halo, s_halo, e_halo, n_halo, force_abort, position)
This routine is used to do parallel checking for 3d data between n and m pe. The comparison is is don...
subroutine mpp_define_layout2d(global_indices, ndivs, layout)
subroutine mpp_set_compute_domain1d(domain, begin, end, size, is_global)
Set user stack size.
integer function mpp_get_domain_pe(domain)
Set user stack size.
subroutine mpp_get_tile_list(domain, tiles)
Return the tile_id on current pelist. one-tile-per-pe is assumed.
subroutine mpp_get_f2c_index_fine(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, is_fine, ie_fine, js_fine, je_fine, nest_level, position)
subroutine mpp_compute_block_extent(isg, ieg, ndivs, ibegin, iend)
Computes the extents of a grid block.
logical function mpp_group_update_initialized(group)
Set user stack size.
integer, save dc_sort_len
length sorted comm keys (=num active communicators)
subroutine mpp_set_compute_domain2d(domain, xbegin, xend, ybegin, yend, xsize, ysize, x_is_global, y_is_global, tile_count)
Set user stack size.
type(domain2d) function, pointer mpp_get_io_domain(domain)
Set user stack size.
subroutine mpp_get_compute_domains2d(domain, xbegin, xend, xsize, ybegin, yend, ysize, position)
Set user stack size.
subroutine mpp_copy_domain2d(domain_in, domain_out)
Copies input 2d domain to the output 2d domain.
integer(i8_kind), dimension(max_fields), save dckey_sorted
list of sorted local addresses
subroutine mpp_get_memory_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
type(domaincommunicator2d), dimension(:), allocatable, target, save d_comm
domain communicators
subroutine mpp_get_domain_shift(domain, ishift, jshift, position)
Returns the shift value in x and y-direction according to domain position..
logical function mpp_domain2d_ne(a, b)
Set user stack size.
logical function mpp_domain_is_initialized(domain)
Set user stack size.
subroutine mpp_broadcast_domain_nest_coarse(domain, tile_coarse)
Broadcast nested domain (useful only outside the context of its own pelist)
subroutine nullify_domain2d_list(domain)
Set user stack size.
integer, dimension(-1:max_addrs), save addrs_idx
index of address associated with d_comm
logical function mpp_domain_is_tile_root_pe(domain)
Returns if current pe is the root pe of the tile, if number of tiles on current pe is greater than 1,...
character(len=name_length) function mpp_get_domain_name(domain)
Set user stack size.
subroutine mpp_get_global_domains2d(domain, xbegin, xend, xsize, ybegin, yend, ysize, position)
Set user stack size.
subroutine mpp_get_c2f_index(nest_domain, is_fine, ie_fine, js_fine, je_fine, is_coarse, ie_coarse, js_coarse, je_coarse, dir, nest_level, position)
Get the index of the data passed from coarse grid to fine grid.
integer function mpp_get_domain_commid(domain)
Set user stack size.
integer, save n_addrs2
number of memory addresses used
subroutine mpp_get_data_domain2d(domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, x_is_global, y_is_global, tile_count, position)
Set user stack size.
subroutine mpp_get_domain_pelist(domain, pelist)
Set user stack size.
integer(i8_kind), dimension(max_addrs2), save addrs2_sorted
list of sorted local addresses
subroutine mpp_domains_set_stack_size(n)
Set user stack size.
subroutine mpp_clear_group_update(group)
Set user stack size.
subroutine mpp_get_update_size(domain, nsend, nrecv, position)
Set user stack size.
integer(i8_kind), dimension(max_addrs), save addrs_sorted
list of sorted local addresses
integer(i8_kind), dimension(max_dom_ids), save ids_sorted
list of sorted domain identifiers
integer function mpp_get_domain_root_pe(domain)
Set user stack size.
logical function mpp_domainug_eq(a, b)
Overload the .eq. for UG.
subroutine mpp_set_data_domain1d(domain, begin, end, size, is_global)
Set user stack size.
recursive subroutine mpp_copy_domain1d(domain_in, domain_out)
Copies input 1d domain to the output 1d domain.
integer, save n_addrs
number of memory addresses used
subroutine mpp_get_data_domain1d(domain, begin, end, size, max_size, is_global)
Set user stack size.
subroutine mpp_define_domains2d(global_indices, layout, domain, pelist, xflags, yflags, xhalo, yhalo, xextent, yextent, maskmap, name, symmetry, memory_size, whalo, ehalo, shalo, nhalo, is_mosaic, tile_count, tile_id, complete, x_cyclic_offset, y_cyclic_offset)
Define 2D data and computational domain on global rectilinear cartesian domain (isg:ieg,...
Private interface for internal usage, compares two sizes.
Broadcasts domain to every pe. Only useful outside the context of it's own pelist.
Parallel checking between two ensembles which run on different set pes at the same time There are tw...
Private interface used for non blocking updates.
Completes a pending non-blocking group update Must follow a call to mpp_start_group_update.
Must be used after a call to mpp_start_update_domains in order to complete a nonblocking domain updat...
Constructor for the mpp_group_update_type which is then used with mpp_start_group_update.
Deallocate given 1D or 2D domain.
Set up a domain decomposition.
Retrieve layout associated with a domain decomposition. Given a global 2D domain and the number of di...
Defines a nullified 1D or 2D domain.
Private interface to updates data domain of 3D field whose computational domains have been computed.
Private helper interface used by mpp_global_field.
Private interface used for mpp_update_domains.
Passes a data field from a unstructured grid to an structured grid Example usage:
Used by mpp_update_nest_coarse to perform domain updates.
Get the boundary data for symmetric domain when the data is at C, E, or N-cell center....
These routines retrieve the axis specifications associated with the compute domains....
Retrieve the entire array of compute domain extents associated with a decomposition.
These routines retrieve the axis specifications associated with the data domains. The domain is a der...
Get the index of the data passed from fine grid to coarse grid. Example usage:
These routines retrieve the axis specifications associated with the global domains....
Retrieve layout associated with a domain decomposition The 1D version of this call returns the number...
These routines retrieve the axis specifications associated with the memory domains....
Retrieve PE number of a neighboring domain.
Retrieve list of PEs associated with a domain decomposition. The 1D version of this call returns an a...
Fill in a global array from domain-decomposed arrays.
Same functionality as mpp_global_field but for unstructured domains.
Global max of domain-decomposed arrays. mpp_global_max is used to get the maximum value of a domain-...
Global min of domain-decomposed arrays. mpp_global_min is used to get the minimum value of a domain-...
Global sum of domain-decomposed arrays. mpp_global_sum is used to get the sum of a domain-decomposed...
Modifies the extents (compute, data and global) of a given domain.
Nullify domain list. This interface is needed in mpp_domains_test. 1-D case can be added in if needed...
Passes data from a structured grid to an unstructured grid Example usage:
Passes a data field from a structured grid to an unstructured grid Example usage:
Reorganization of distributed global arrays. mpp_redistribute is used to reorganize a distributed ar...
These routines set the axis specifications associated with the compute domains. The domain is a deriv...
These routines set the axis specifications associated with the data domains. The domain is a derived ...
These routines set the axis specifications associated with the global domains. The domain is a derive...
Private interface used for non blocking updates.
Starts non-blocking group update Must be followed up with a call to mpp_complete_group_update mpp_gro...
Interface to start halo updates mpp_start_update_domains is used to start a halo update of a domain-d...
Performs halo updates for a given domain.
Similar to mpp_update_domains , updates adjoint domains.
Pass the data from fine grid to fill the buffer to be ready to be interpolated onto coarse grid....
Pass the data from coarse grid to fill the buffer to be ready to be interpolated onto fine grid....
Type used to represent the contact between tiles.
One dimensional domain used to manage shared data access between pes.
A private type used to specify index limits for a domain decomposition.
The domain2D type contains all the necessary information to define the global, compute and data domai...
Private type to specify multiple index limits and pe information for a 2D domain.
Used to specify index limits along an axis of a domain.
Used for sending domain data between pe's.
Domain information for managing data on unstructured grids.
index bounds for use in nestSpec
used for updates on a group
domain with nested fine and course tiles
Private type to hold data for each level of nesting.
Used to specify bounds and index information for nested tiles as a linked list.
Used for nonblocking data transfer.
Type for overlapping data.
Private type for overlap specifications.
Upper and lower x and y bounds for a tile.
Private type for axis specification data for an unstructured grid.
Private type for axis specification data for an unstructured domain.
This interface uses a conversion to an integer representation of real numbers to give order-invariant...
Definition: mpp_efp.F90:70
subroutine, public mpp_memuse_end(text, unit)
End the memory collection, and report on total memory used during the execution of the model run.
subroutine, public mpp_memuse_begin
Initialize the memory module, and record the initial memory use.
subroutine mpp_sync_self(pelist, check, request, msg_size, msg_type)
This is to check if current PE's outstanding puts are complete but we can't use shmem_fence because w...
integer function stdout()
This function returns the current standard fortran unit numbers for output.
Definition: mpp_util.inc:42
subroutine mpp_set_current_pelist(pelist, no_sync)
Set context pelist.
Definition: mpp_util.inc:498
subroutine mpp_init(flags, localcomm, test_level, alt_input_nml_path)
Initialize the mpp_mod module. Must be called before any usage.
integer function stderr()
This function returns the current standard fortran unit numbers for error messages.
Definition: mpp_util.inc:50
subroutine mpp_type_free(dtype)
Deallocates memory for mpp_type objects @TODO This should probably not take a pointer,...
subroutine mpp_declare_pelist(pelist, name, commID)
Declare a pelist.
Definition: mpp_util.inc:469
integer function stdlog()
This function returns the current standard fortran unit numbers for log messages. Log messages,...
Definition: mpp_util.inc:58
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:420
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:406
subroutine mpp_sync(pelist, do_self)
Synchronize PEs in list.
integer function mpp_clock_id(name, flags, grain)
Return an ID for a new or existing clock.
Definition: mpp_util.inc:713
Scatter a vector across all PEs.
Definition: mpp.F90:775
Perform parallel broadcasts.
Definition: mpp.F90:1104
Error handler.
Definition: mpp.F90:381
Reduction operations. Find the max of scalar a from the PEs in pelist result is also automatically br...
Definition: mpp.F90:537
Reduction operations. Find the min of scalar a from the PEs in pelist result is also automatically br...
Definition: mpp.F90:559
Receive data from another PE.
Definition: mpp.F90:950
Send data to a receiving PE.
Definition: mpp.F90:1017
Reduction operation.
Definition: mpp.F90:596
Calculates sum of a given numerical array across pe's for adjoint domains.
Definition: mpp.F90:641
Basic message-passing call.
Definition: mpp.F90:885
Create a mpp_type variable.
Definition: mpp.F90:509
Data types for generalized data transfer (e.g. MPI_Type)
Definition: mpp.F90:286