FMS  2025.04
Flexible Modeling System
mpp_unstruct_domain.inc
1 !***********************************************************************
2 !* Apache License 2.0
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* Licensed under the Apache License, Version 2.0 (the "License");
7 !* you may not use this file except in compliance with the License.
8 !* You may obtain a copy of the License at
9 !*
10 !* http://www.apache.org/licenses/LICENSE-2.0
11 !*
12 !* FMS is distributed in the hope that it will be useful, but WITHOUT
13 !* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied;
14 !* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 !* PARTICULAR PURPOSE. See the License for the specific language
16 !* governing permissions and limitations under the License.
17 !***********************************************************************
18 !> @file
19 !> @brief Routines for defining and managing unstructured grids
20 
21 !> @addtogroup mpp_domains_mod
22 !> @{
23  !#####################################################################
24  subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, &
25  & grid_index, name)
26  type(domainug), intent(inout) :: UG_domain
27  type(domain2d), target, intent(in) :: SG_domain
28  integer, intent(in) :: npts_tile(:) !< number of unstructured points on each tile
29  integer, intent(in) :: grid_nlev(:) !< number of levels in each unstructured grid.
30  integer, intent(in) :: ndivs
31  integer, intent(in) :: npes_io_group!< number of processors in a io group.
32  !! Only pe with same tile_id
33  !! in the same group
34  integer, intent(in) :: grid_index(:)
35  character(len=*), optional, intent(in) :: name
36  integer, dimension(size(npts_tile(:))) :: ndivs_tile, pe_start, pe_end
37  integer, dimension(0:ndivs-1) :: ibegin, iend, costs_list
38  integer :: ntiles, ndivs_used, cur_tile
39  integer :: n, ts, te, p, pos, tile_id, ngroup, group_id, my_pos, i
40  integer :: npes_in_group, is, ie, ntotal_costs, max_cost, cur_cost, costs_left
41  integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
42  real :: avg_cost
43  integer :: costs(size(npts_tile(:)))
44 
45  ug_domain%SG_domain => sg_domain
46  ntiles = size(npts_tile(:))
47  ug_domain%ntiles = ntiles
48 
49  !--- total number of points must be no less than ndivs
50  if(sum(npts_tile)<ndivs) call mpp_error(fatal, &
51  & "mpp_define_unstruct_domain: total number of points is less than ndivs")
52  !--- We are assuming nlev on each grid is at least one.
53  do n = 1, size(grid_nlev(:))
54  if(grid_nlev(n) < 1) call mpp_error(fatal, &
55  & "mpp_define_unstruct_domain: grid_nlev at some point is less than 1")
56  enddo
57 
58  !-- costs for each tile.
59  pos = 0
60  do n = 1, ntiles
61  costs(n) = 0
62  do i = 1, npts_tile(n)
63  pos = pos + 1
64  costs(n) = costs(n) + grid_nlev(pos)
65  enddo
66  enddo
67  ! compute number of divisions for each tile.
68  ntotal_costs = sum(costs)
69  !--- get the upper limit of ndivs for each tile.
70  do n = 1, ntiles
71  ndivs_tile(n) = ceiling(real(costs(n)*ndivs)/ntotal_costs)
72  enddo
73 
74  ndivs_used = sum(ndivs_tile)
75  do while (ndivs_used > ndivs)
76  max_cost = 0
77  cur_tile = 0
78  do n = 1, ntiles
79  if( ndivs_tile(n) > 1 ) then
80  cur_cost = ceiling(real(costs(n))/(ndivs_tile(n)-1))
81  if( max_cost == 0 .OR. cur_cost<max_cost) then
82  max_cost = cur_cost
83  cur_tile = n
84  endif
85  endif
86  enddo
87  ndivs_used = ndivs_used-1
88  ndivs_tile(cur_tile) = ndivs_tile(cur_tile) - 1
89  enddo
90 
91  te = -1
92  ioff = 0
93  do n = 1, ntiles
94  ts = te + 1
95  te = ts + ndivs_tile(n) - 1
96  costs_left = costs(n)
97  ndiv_left = ndivs_tile(n)
98  npts_left = npts_tile(n)
99  cur_pos = 1
100  do ndiv = 1, ndivs_tile(n)
101  cur_cost = 0
102  ibegin(ts+ndiv-1) = cur_pos
103  avg_cost = real(costs_left)/ndiv_left
104  do i = cur_pos, npts_tile(n)
105  cur_cost = cur_cost + grid_nlev(i+ioff)
106  costs_left = costs_left - grid_nlev(i+ioff)
107  if(npts_left < ndiv_left ) then
108  call mpp_error(fatal, "mpp_define_unstruct_domain: npts_left < ndiv_left")
109  else if(npts_left == ndiv_left ) then
110  cur_pos = i + 1
111  exit
112  else if(cur_cost .GE. avg_cost) then
113  prev_cost = cur_cost - grid_nlev(i+ioff)
114  if(i==cur_pos) then
115  cur_pos = i + 1
116  exit
117  else if( cur_cost - avg_cost .LE. avg_cost - prev_cost ) then
118  cur_pos = i + 1
119  exit
120  else
121  cur_pos = i
122  cur_cost = prev_cost
123  costs_left = costs_left + grid_nlev(i+ioff)
124  npts_left = npts_left+1
125  exit
126  endif
127  endif
128  npts_left = npts_left-1
129  enddo
130  iend(ts+ndiv-1) = cur_pos - 1
131  costs_list(ts+ndiv-1) = cur_cost
132  ndiv_left = ndiv_left-1
133  npts_left = npts_left-1
134  enddo
135  pe_start(n) = ts
136  pe_end(n) = te
137  ioff = ioff+ npts_tile(n)
138  enddo
139  if (associated(ug_domain%list)) deallocate(ug_domain%list) !< Check if allocated
140  allocate(ug_domain%list(0:ndivs-1))
141  do p = 0, ndivs-1
142  ug_domain%list(p)%compute%begin = ibegin(p)
143  ug_domain%list(p)%compute%end = iend(p)
144  ug_domain%list(p)%compute%size = ug_domain%list(p)%compute%end - ug_domain%list(p)%compute%begin + 1
145  ug_domain%list(p)%compute%max_size = 0
146  ug_domain%list(p)%pos = p
147  ug_domain%list(p)%pe = p + mpp_root_pe()
148  pos = 0
149  do n = 1, ntiles
150  if( p .GE. pe_start(n) .AND. p .LE. pe_end(n) ) then
151  ug_domain%list(p)%tile_id = n
152  exit
153  endif
154  pos = pos + npts_tile(n)
155  enddo
156  is = ug_domain%list(p)%compute%begin+pos
157  ie = ug_domain%list(p)%compute%end+pos
158  ug_domain%list(p)%compute%begin_index = minval(grid_index(is:ie))
159  ug_domain%list(p)%compute%end_index = maxval(grid_index(is:ie))
160  enddo
161 
162  !--- write out domain decomposition from root pe
163  if(mpp_pe() == mpp_root_pe() .and. present(name)) then
164  write(stdout(),*) "unstruct domain name = ", trim(name)
165  write(stdout(),*) ug_domain%list(:)%compute%size
166  endif
167 
168  pos = mpp_pe() - mpp_root_pe()
169  ug_domain%pe = mpp_pe()
170  ug_domain%pos = pos
171  ug_domain%tile_id = ug_domain%list(pos)%tile_id
172  p = pe_start(ug_domain%tile_id)
173  ug_domain%tile_root_pe = ug_domain%list(p)%pe
174  ug_domain%tile_npes = pe_end(ug_domain%tile_id) - pe_start(ug_domain%tile_id) + 1
175  ug_domain%compute = ug_domain%list(pos)%compute
176  ug_domain%compute%max_size = maxval( ug_domain%list(:)%compute%size )
177  ug_domain%global%begin = 1
178  ug_domain%global%end = npts_tile(ug_domain%tile_id)
179  ug_domain%global%size = ug_domain%global%end - ug_domain%global%begin + 1
180  ug_domain%global%max_size = -1 ! currently this is not supposed to be used.
181  pos = 0
182  do n = 1, ug_domain%tile_id-1
183  pos = pos + npts_tile(n)
184  enddo
185  ug_domain%global%begin_index = grid_index(pos+1)
186  ug_domain%global%end_index = grid_index(pos+npts_tile(n))
187 
188  if (associated(ug_domain%grid_index)) deallocate(ug_domain%grid_index) !< Check if allocated
189  allocate(ug_domain%grid_index(ug_domain%compute%size))
190  do n = 1, ug_domain%compute%size
191  ug_domain%grid_index(n) = grid_index(pos+ug_domain%compute%begin+n-1)
192  enddo
193 
194  !--- define io_domain
195  if (associated(ug_domain%io_domain)) deallocate(ug_domain%io_domain) !< Check if allocated
196  allocate(ug_domain%io_domain)
197  tile_id = ug_domain%tile_id
198  ug_domain%io_domain%pe = ug_domain%pe
199  !--- figure out number groups for current tile
200  if(npes_io_group == 0) then
201  ngroup = 1
202  else
203  ngroup = ceiling(real(ndivs_tile(tile_id))/ npes_io_group)
204  endif
205 
206 !----------
207 !ug support
208  ug_domain%npes_io_group = npes_io_group
209  ug_domain%io_layout = ngroup
210 !----------
211 
212  call mpp_compute_extent(1, ndivs_tile(tile_id), ngroup, ibegin(0:ngroup-1), iend(0:ngroup-1))
213  my_pos = ug_domain%pe - ug_domain%tile_root_pe + 1
214  do n = 0, ngroup-1
215  if( my_pos .GE. ibegin(n) .AND. my_pos .LE. iend(n) ) then
216  group_id = n
217  exit
218  endif
219  enddo
220 
221  ug_domain%io_domain%tile_id = group_id+1
222  ug_domain%io_domain%compute = ug_domain%compute
223  ug_domain%io_domain%pe = ug_domain%pe
224  ug_domain%io_domain%pos = my_pos - ibegin(group_id) + 1
225  ug_domain%io_domain%tile_root_pe = ibegin(group_id) + ug_domain%tile_root_pe - 1
226  pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe()
227  ug_domain%io_domain%global%begin = ug_domain%list(pos)%compute%begin
228  ug_domain%io_domain%global%begin_index = ug_domain%list(pos)%compute%begin_index
229  pos = iend(group_id) + ug_domain%tile_root_pe - mpp_root_pe() - 1
230  ug_domain%io_domain%global%end = ug_domain%list(pos)%compute%end
231  ug_domain%io_domain%global%end_index = ug_domain%list(pos)%compute%end_index
232  ug_domain%io_domain%global%size = ug_domain%io_domain%global%end - ug_domain%io_domain%global%begin + 1
233 
234  npes_in_group = iend(group_id) - ibegin(group_id) + 1
235  if (associated(ug_domain%io_domain%list)) deallocate(ug_domain%io_domain%list) !< Check if allocated
236  allocate(ug_domain%io_domain%list(0:npes_in_group-1))
237  do n = 0, npes_in_group-1
238  pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe() + n
239  ug_domain%io_domain%list(n)%compute = ug_domain%list(pos)%compute
240  ug_domain%io_domain%list(n)%pos = n
241  ug_domain%io_domain%list(n)%pe = ug_domain%list(pos)%pe
242  ug_domain%io_domain%list(n)%tile_id = group_id+1
243  enddo
244 
245  call compute_overlap_sg2ug(ug_domain, sg_domain)
246  call compute_overlap_ug2sg(ug_domain)
247 
248  return
249 
250  end subroutine mpp_define_unstruct_domain
251 
252 
253  !####################################################################
254  subroutine compute_overlap_sg2ug(UG_domain, SG_domain)
255  type(domainug), intent(inout) :: UG_domain
256  type(domain2d), intent(in) :: SG_domain
257  integer, dimension(0:size(SG_domain%list(:))-1) :: send_cnt, recv_cnt
258  integer, dimension(0:size(SG_domain%list(:))-1) :: send_buffer_pos, recv_buffer_pos
259  integer, dimension(:), allocatable :: send_buffer, recv_buffer, index_list
260  integer, dimension(:), allocatable :: buffer_pos
261  integer :: tile_id, nlist, nxg, begin_index, end_index, i, j
262  integer :: m, n, list, l, isc, iec, jsc, jec, ibegin, iend, grid_index
263  integer :: nrecv, nsend, send_pos, recv_pos, pos
264 
265  !--- figure out the recv index information.
266  tile_id = ug_domain%tile_id
267  nlist = size(sg_domain%list(:))
268  nxg = sg_domain%x(1)%global%size
269  begin_index = ug_domain%compute%begin_index
270  end_index = ug_domain%compute%end_index
271  pos = 0
272  recv_cnt = 0
273  allocate(index_list(ug_domain%compute%size))
274  allocate(send_buffer(ug_domain%compute%size))
275  index_list = -1
276  do n = 0, nlist-1
277  if(sg_domain%list(n)%tile_id(1) .NE. tile_id) cycle
278  isc = sg_domain%list(n)%x(1)%compute%begin; iec = sg_domain%list(n)%x(1)%compute%end
279  jsc = sg_domain%list(n)%y(1)%compute%begin; jec = sg_domain%list(n)%y(1)%compute%end
280  ibegin = (jsc-1)*nxg + isc
281  iend = (jec-1)*nxg + iec
282  if(ibegin > end_index .OR. iend < begin_index) cycle
283  do l = 1, ug_domain%compute%size
284  grid_index = ug_domain%grid_index(l)
285  i = mod((grid_index-1), nxg) + 1
286  j = (grid_index-1)/nxg + 1
287  if( i .GE. isc .AND. i .LE. iec .and. j .GE. jsc .AND. j .LE. jec ) then
288  recv_cnt(n) = recv_cnt(n) + 1
289  pos = pos + 1
290  if(pos > ug_domain%compute%size) call mpp_error(fatal, &
291  'compute_overlap_SG2UG: pos > UG_domain%compute%size')
292  index_list(pos) = l
293  send_buffer(pos) = grid_index
294  endif
295  enddo
296  enddo
297 
298  !--- make sure sum(recv_cnt) == UG_domain%compute%size
299  if( ug_domain%compute%size .NE. sum(recv_cnt) ) then
300  print*,"pe=", mpp_pe(), ug_domain%compute%size, sum(recv_cnt)
301  call mpp_error(fatal, &
302  .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(recv_cnt)")
303  endif
304  allocate(buffer_pos(0:nlist-1))
305  pos = 0
306  do list = 0,nlist-1
307  buffer_pos(list) = pos
308  pos = pos + recv_cnt(list)
309  enddo
310 
311  nrecv = count( recv_cnt > 0 )
312  ug_domain%SG2UG%nrecv = nrecv
313  if (associated(ug_domain%SG2UG%recv)) deallocate(ug_domain%SG2UG%recv) !< Check if allocated
314  allocate(ug_domain%SG2UG%recv(nrecv))
315  nrecv = 0
316  pos = 0
317  do list = 0,nlist-1
318  m = mod( sg_domain%pos+nlist-list, nlist )
319  if( recv_cnt(m) > 0 ) then
320  nrecv = nrecv+1
321  ug_domain%SG2UG%recv(nrecv)%count = recv_cnt(m)
322  ug_domain%SG2UG%recv(nrecv)%pe = ug_domain%list(m)%pe
323  allocate(ug_domain%SG2UG%recv(nrecv)%i(recv_cnt(m)))
324  pos = buffer_pos(m)
325  do l = 1, recv_cnt(m)
326  pos = pos + 1
327  ug_domain%SG2UG%recv(nrecv)%i(l) = index_list(pos)
328  enddo
329  endif
330  enddo
331 
332  !--- figure out the send index information.
333  send_cnt = recv_cnt
334  recv_cnt = 0
335  call mpp_alltoall(send_cnt,1,recv_cnt,1)
336  !--- make sure sum(send_cnt) == UG_domain%compute%size
337  if( ug_domain%compute%size .NE. sum(send_cnt) ) call mpp_error(fatal, &
338  .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(send_cnt)")
339  allocate(recv_buffer(sum(recv_cnt)))
340  send_buffer_pos = 0; recv_buffer_pos = 0
341  send_pos = 0; recv_pos = 0
342  do n = 0, nlist-1
343  if(send_cnt(n) > 0) then
344  send_buffer_pos(n) = send_pos
345  send_pos = send_pos + send_cnt(n)
346  endif
347  if(recv_cnt(n) > 0) then
348  recv_buffer_pos(n) = recv_pos
349  recv_pos = recv_pos + recv_cnt(n)
350  endif
351  enddo
352 
353  call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
354  recv_buffer, recv_cnt, recv_buffer_pos)
355 
356  nsend = count( recv_cnt(:) > 0 )
357  ug_domain%SG2UG%nsend = nsend
358  if (associated(ug_domain%SG2UG%send)) deallocate(ug_domain%SG2UG%send) !< Check if allocated
359  allocate(ug_domain%SG2UG%send(nsend))
360  nsend = 0
361  isc = sg_domain%x(1)%compute%begin
362  jsc = sg_domain%y(1)%compute%begin
363  do list = 0,nlist-1
364  m = mod( sg_domain%pos+list, nlist )
365  if( recv_cnt(m) > 0 ) then
366  nsend = nsend+1
367  ug_domain%SG2UG%send(nsend)%count = recv_cnt(m)
368  ug_domain%SG2UG%send(nsend)%pe = ug_domain%list(m)%pe
369  allocate(ug_domain%SG2UG%send(nsend)%i(recv_cnt(m)))
370  allocate(ug_domain%SG2UG%send(nsend)%j(recv_cnt(m)))
371  pos = recv_buffer_pos(m)
372  do l = 1, recv_cnt(m)
373  grid_index = recv_buffer(pos+l)
374  ug_domain%SG2UG%send(nsend)%i(l) = mod(grid_index-1,nxg) + 1
375  ug_domain%SG2UG%send(nsend)%j(l) = (grid_index-1)/nxg + 1
376  enddo
377  endif
378  enddo
379  deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
380 
381 return
382 
383  end subroutine compute_overlap_sg2ug
384 
385  !####################################################################
386  subroutine compute_overlap_ug2sg(UG_domain)
387  type(domainug), intent(inout) :: UG_domain
388 
389  !--- UG2SG is the reverse of SG2UG
390  ug_domain%UG2SG%nsend = ug_domain%SG2UG%nrecv
391  ug_domain%UG2SG%send => ug_domain%SG2UG%recv
392  ug_domain%UG2SG%nrecv = ug_domain%SG2UG%nsend
393  ug_domain%UG2SG%recv => ug_domain%SG2UG%send
394 
395  return
396 
397  end subroutine compute_overlap_ug2sg
398 
399  !####################################################################
400  subroutine mpp_get_ug_sg_domain(UG_domain,SG_domain)
401  type(domainug), intent(inout) :: UG_domain
402  type(domain2d), pointer :: SG_domain
403 
404  sg_domain => ug_domain%SG_domain
405 
406  return
407 
408  end subroutine mpp_get_ug_sg_domain
409 
410  !####################################################################
411  function mpp_get_ug_io_domain(domain)
412  type(domainug), intent(in) :: domain
413  type(domainug), pointer :: mpp_get_UG_io_domain
414 
415  if(ASSOCIATED(domain%io_domain)) then
416  mpp_get_ug_io_domain => domain%io_domain
417  else
418  call mpp_error(fatal, "mpp_get_UG_io_domain: io_domain is not defined, contact developer")
419  endif
420 
421  end function mpp_get_ug_io_domain
422 
423  !#####################################################################
424  subroutine mpp_get_ug_compute_domain( domain, begin, end, size)
425  type(domainug), intent(in) :: domain
426  integer, intent(out), optional :: begin, end, size
427 
428  if( PRESENT(begin) )begin = domain%compute%begin
429  if( PRESENT(end) )end = domain%compute%end
430  if( PRESENT(size) )size = domain%compute%size
431  return
432  end subroutine mpp_get_ug_compute_domain
433 
434  !#####################################################################
435  subroutine mpp_get_ug_global_domain( domain, begin, end, size)
436  type(domainug), intent(in) :: domain
437  integer, intent(out), optional :: begin, end, size
438 
439  if( PRESENT(begin) )begin = domain%global%begin
440  if( PRESENT(end) )end = domain%global%end
441  if( PRESENT(size) )size = domain%global%size
442  return
443  end subroutine mpp_get_ug_global_domain
444 
445  !#####################################################################
446  subroutine mpp_get_ug_compute_domains( domain, begin, end, size )
447  type(domainug), intent(in) :: domain
448  integer, intent(out), optional, dimension(:) :: begin, end, size
449 
450  !we use shape instead of size for error checks because size is used as an argument
451  if( PRESENT(begin) )then
452  if( any(shape(begin).NE.shape(domain%list)) ) &
453  call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
454  begin(:) = domain%list(:)%compute%begin
455  end if
456  if( PRESENT(end) )then
457  if( any(shape(end).NE.shape(domain%list)) ) &
458  call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
459  end(:) = domain%list(:)%compute%end
460  end if
461  if( PRESENT(size) )then
462  if( any(shape(size).NE.shape(domain%list)) ) &
463  call mpp_error( fatal, 'mpp_get_UG_compute_domains: size array size does not match domain.' )
464  size(:) = domain%list(:)%compute%size
465  end if
466  return
467  end subroutine mpp_get_ug_compute_domains
468 
469  !#####################################################################
470  subroutine mpp_get_ug_domains_index( domain, begin, end)
471  type(domainug), intent(in) :: domain
472  integer, intent(out), dimension(:) :: begin, end
473 
474  !we use shape instead of size for error checks because size is used as an argument
475  if( any(shape(begin).NE.shape(domain%list)) ) &
476  call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
477  begin(:) = domain%list(:)%compute%begin_index
478  if( any(shape(end).NE.shape(domain%list)) ) &
479  call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
480  end(:) = domain%list(:)%compute%end_index
481  return
482  end subroutine mpp_get_ug_domains_index
483 
484  !#####################################################################
485  function mpp_get_ug_domain_ntiles(domain)
486  type(domainug), intent(in) :: domain
487  integer :: mpp_get_UG_domain_ntiles
488 
489  mpp_get_ug_domain_ntiles = domain%ntiles
490  return
491  end function mpp_get_ug_domain_ntiles
492 
493  !#######################################################################
494  subroutine mpp_get_ug_domain_tile_list(domain, tiles)
495  type(domainug), intent(in) :: domain
496  integer, intent(inout) :: tiles(:)
497  integer :: i
498 
499  if( size(tiles(:)).NE.size(domain%list(:)) ) &
500  call mpp_error( fatal, 'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
501  do i = 1, size(tiles(:))
502  tiles(i) = domain%list(i-1)%tile_id
503  end do
504 
505  end subroutine mpp_get_ug_domain_tile_list
506 
507  !#####################################################################
508  function mpp_get_ug_domain_tile_id(domain)
509  type(domainug), intent(in) :: domain
510  integer :: mpp_get_UG_domain_tile_id
511 
512  mpp_get_ug_domain_tile_id = domain%tile_id
513  return
514  end function mpp_get_ug_domain_tile_id
515 
516  !####################################################################
517  function mpp_get_ug_domain_npes(domain)
518  type(domainug), intent(in) :: domain
519  integer :: mpp_get_UG_domain_npes
520 
521  mpp_get_ug_domain_npes = size(domain%list(:))
522  return
523 
524  end function mpp_get_ug_domain_npes
525 
526 
527  !####################################################################
528  subroutine mpp_get_ug_domain_pelist( domain, pelist)
529  type(domainug), intent(in) :: domain
530  integer, intent(out) :: pelist(:)
531 
532  if( size(pelist(:)).NE.size(domain%list(:)) ) &
533  call mpp_error( fatal, 'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
534 
535  pelist(:) = domain%list(:)%pe
536  return
537 
538  end subroutine mpp_get_ug_domain_pelist
539 
540  !###################################################################
541  subroutine mpp_get_ug_domain_tile_pe_inf( domain, root_pe, npes, pelist)
542  type(domainug), intent(in) :: domain
543  integer, optional, intent(out) :: root_pe, npes
544  integer, optional, intent(out) :: pelist(:)
545 
546  if(present(root_pe)) root_pe = domain%tile_root_pe
547  if(present(npes)) npes = domain%tile_npes
548 
549  if(present(pelist)) then
550  if( size(pelist(:)).NE. domain%tile_npes ) &
551  call mpp_error( fatal, 'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
552  pelist(:) = domain%list(domain%pos:domain%pos+domain%tile_npes-1)%pe
553  endif
554  return
555 
556  end subroutine mpp_get_ug_domain_tile_pe_inf
557 
558 
559  !####################################################################
560  subroutine mpp_get_ug_domain_grid_index( domain, grid_index)
561  type(domainug), intent(in) :: domain
562  integer, intent(out) :: grid_index(:)
563 
564  if( size(grid_index(:)).NE.size(domain%grid_index(:)) ) &
565  call mpp_error( fatal, 'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
566 
567  grid_index(:) = domain%grid_index(:)
568  return
569 
570  end subroutine mpp_get_ug_domain_grid_index
571 
572  !###################################################################
573  subroutine mpp_define_null_ug_domain(domain)
574  type(domainug), intent(inout) :: domain
575 
576  domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
577  domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
578  domain%pe = null_pe
579  domain%ntiles = -1
580  domain%pos = -1
581  domain%tile_id = -1
582  domain%tile_root_pe = -1
583 
584  end subroutine mpp_define_null_ug_domain
585 
586 !##############################################################################
587  !> @brief Broadcast domain (useful only outside the context of its own pelist)
588  subroutine mpp_broadcast_domain_ug( domain )
589  type(domainug), intent(inout) :: domain
590  integer, allocatable :: pes(:)
591  logical :: native !true if I'm on the pelist of this domain
592  integer :: listsize, listpos
593  integer :: n
594  integer, dimension(7) :: msg, info !pe and compute domain of each item in list
595  integer :: errunit
596 
597  errunit = stderr()
598  if( .NOT.module_is_initialized ) &
599  call mpp_error( fatal, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.' )
600 
601 !get the current pelist
602  allocate( pes(0:mpp_npes()-1) )
603  call mpp_get_current_pelist(pes)
604 
605 !am I part of this domain?
606  native = ASSOCIATED(domain%list)
607 
608 !set local list size
609  if( native )then
610  listsize = size(domain%list(:))
611  else
612  listsize = 0
613  end if
614  call mpp_max(listsize)
615 
616  if( .NOT.native )then
617 !initialize domain%list and set null values in message
618  if (associated(domain%list)) deallocate(domain%list) !< Check if allocated
619  allocate( domain%list(0:listsize-1) )
620  domain%pe = null_pe
621  domain%pos = -1
622  domain%ntiles = -1
623  domain%compute%begin = 1
624  domain%compute%end = -1
625  domain%compute%begin_index = 1
626  domain%compute%end_index = -1
627  domain%global %begin = -1
628  domain%global %end = -1
629  domain%tile_id = -1
630  domain%tile_root_pe = -1
631  end if
632 !initialize values in info
633  info(1) = domain%pe
634  info(2) = domain%pos
635  info(3) = domain%tile_id
636  call mpp_get_ug_compute_domain( domain, info(4), info(5))
637  info(6) = domain%compute%begin_index
638  info(7) = domain%compute%end_index
639 !broadcast your info across current pelist and unpack if needed
640  listpos = 0
641  do n = 0,mpp_npes()-1
642  msg = info
643  if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
644  call mpp_broadcast( msg, 7, pes(n) )
645 !no need to unpack message if native
646 !no need to unpack message from non-native PE
647  if( .NOT.native .AND. msg(1).NE.null_pe )then
648  domain%list(listpos)%pe = msg(1)
649  domain%list(listpos)%pos = msg(2)
650  domain%list(listpos)%tile_id = msg(3)
651  domain%list(listpos)%compute%begin = msg(4)
652  domain%list(listpos)%compute%end = msg(5)
653  domain%list(listpos)%compute%begin_index = msg(6)
654  domain%list(listpos)%compute%end_index = msg(7)
655  listpos = listpos + 1
656  if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'ls,le=', msg(4:5)
657  end if
658  end do
659 
660  end subroutine mpp_broadcast_domain_ug
661 
662 !------------------------------------------------------------------------------
663 function mpp_domain_ug_is_tile_root_pe(domain) result(is_root)
664 
665  !<Inputs/Outputs
666  type(domainug),intent(in) :: domain
667  logical(l8_kind) :: is_root
668 
669  if (domain%pe .eq. domain%tile_root_pe) then
670  is_root = .true.
671  else
672  is_root = .false.
673  endif
674 
675  return
676 end function mpp_domain_ug_is_tile_root_pe
677 
678 !------------------------------------------------------------------------------
679 !HELP: There needs to be a subroutine to return the "io_layout" for
680 ! an unstructured domain, so I made one. Someone should check
681 ! to see if this is correct.
682 function mpp_get_io_domain_ug_layout(domain) result(io_layout)
683 
684  !<Inputs/Outputs
685  type(domainug),intent(in) :: domain
686  integer(i4_kind) :: io_layout
687 
688  io_layout = domain%io_layout
689 
690  return
691 end function
692 
693 
694 !------------------------------------------------------------------
695 subroutine deallocate_unstruct_overlap_type(overlap)
696  type(unstruct_overlap_type), intent(inout) :: overlap
697 
698  if(associated(overlap%i)) deallocate(overlap%i)
699  if(associated(overlap%j)) deallocate(overlap%j)
700 
701 end subroutine deallocate_unstruct_overlap_type
702 
703 !------------------------------------------------------------------
704 subroutine deallocate_unstruct_pass_type(domain)
705  type(domainug), intent(inout) :: domain
706  integer :: n
707 
708  do n = 1, domain%UG2SG%nsend
709  call deallocate_unstruct_overlap_type(domain%UG2SG%send(n))
710  enddo
711  do n = 1, domain%UG2SG%nrecv
712  call deallocate_unstruct_overlap_type(domain%UG2SG%recv(n))
713  enddo
714 
715  ! SG2UG%{send,recv} point to the same memory as UG2SG%{send,recv}
716  ! respectively. Thus, we only need to `deallocate` one, and nullify
717  ! the other set.
718  if(associated(domain%UG2SG%send)) then
719  deallocate(domain%UG2SG%send)
720  nullify(domain%UG2SG%send)
721  nullify(domain%SG2UG%recv)
722  end if
723  if(associated(domain%UG2SG%recv)) then
724  deallocate(domain%UG2SG%recv)
725  nullify(domain%UG2SG%recv)
726  nullify(domain%SG2UG%send)
727  end if
728 end subroutine deallocate_unstruct_pass_type
729 
730 !------------------------------------------------------------------
731 subroutine mpp_deallocate_domainug(domain)
732 
733  !<Inputs/Outputs
734  type(domainug),intent(inout) :: domain
735 
736  if (associated(domain%list)) then
737  deallocate(domain%list)
738  domain%list => null()
739  endif
740 
741  if (associated(domain%io_domain)) then
742  if (associated(domain%io_domain%list)) then
743  deallocate(domain%io_domain%list)
744  domain%io_domain%list => null()
745  endif
746  deallocate(domain%io_domain)
747  domain%io_domain => null()
748  endif
749 
750  call deallocate_unstruct_pass_type(domain)
751 
752  if (associated(domain%grid_index)) then
753  deallocate(domain%grid_index)
754  domain%grid_index => null()
755  endif
756 
757  if (associated(domain%SG_domain)) then
758  domain%SG_domain => null()
759  endif
760 
761  return
762 end subroutine mpp_deallocate_domainug
763 
764  !###################################################################
765  !> Overload the .eq. for UG
766  function mpp_domainug_eq( a, b )
767  logical :: mpp_domainug_eq
768  type(domainug), intent(in) :: a, b
769 
770  if (associated(a%SG_domain) .and. associated(b%SG_domain)) then
771  if (a%SG_domain .ne. b%SG_domain) then
772  mpp_domainug_eq = .false.
773  return
774  endif
775  elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then
776  mpp_domainug_eq = .false.
777  return
778  elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then
779  mpp_domainug_eq = .false.
780  return
781  endif
782 
783  mpp_domainug_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. &
784  (a%pos .EQ. b%pos) .AND. &
785  (a%ntiles .EQ. b%ntiles) .AND. &
786  (a%tile_id .EQ. b%tile_id) .AND. &
787  (a%tile_npes .EQ. b%tile_npes) .AND. &
788  (a%tile_root_pe .EQ. b%tile_root_pe)
789 
790  if(.not. mpp_domainug_eq) return
791 
792  mpp_domainug_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
793  a%compute%end .EQ.b%compute%end .AND. &
794  a%global%begin .EQ.b%global%begin .AND. &
795  a%global%end .EQ.b%global%end .AND. &
796  a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. &
797  a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. &
798  a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. &
799  a%UG2SG%nrecv .EQ.b%UG2SG%nrecv &
800  )
801 
802  return
803  end function mpp_domainug_eq
804 
805  !> Overload the .ne. for UG
806  function mpp_domainug_ne( a, b )
807  logical :: mpp_domainug_ne
808  type(domainug), intent(in) :: a, b
809 
810  mpp_domainug_ne = .NOT. ( a.EQ.b )
811  return
812  end function mpp_domainug_ne
813 
814 #undef MPP_TYPE_
815 #define MPP_TYPE_ real(r8_kind)
816 #undef mpp_pass_SG_to_UG_2D_
817 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r8_2d
818 #undef mpp_pass_SG_to_UG_3D_
819 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r8_3d
820 #undef mpp_pass_UG_to_SG_2D_
821 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r8_2d
822 #undef mpp_pass_UG_to_SG_3D_
823 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r8_3d
824 #include <mpp_unstruct_pass_data.fh>
825 
826 #undef MPP_TYPE_
827 #define MPP_TYPE_ real(r4_kind)
828 #undef mpp_pass_SG_to_UG_2D_
829 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r4_2d
830 #undef mpp_pass_SG_to_UG_3D_
831 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r4_3d
832 #undef mpp_pass_UG_to_SG_2D_
833 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r4_2d
834 #undef mpp_pass_UG_to_SG_3D_
835 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r4_3d
836 #include <mpp_unstruct_pass_data.fh>
837 
838 #undef MPP_TYPE_
839 #define MPP_TYPE_ integer(i4_kind)
840 #undef mpp_pass_SG_to_UG_2D_
841 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_i4_2d
842 #undef mpp_pass_SG_to_UG_3D_
843 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_i4_3d
844 #undef mpp_pass_UG_to_SG_2D_
845 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_i4_2d
846 #undef mpp_pass_UG_to_SG_3D_
847 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_i4_3d
848 #include <mpp_unstruct_pass_data.fh>
849 
850 #undef MPP_TYPE_
851 #define MPP_TYPE_ logical(i4_kind)
852 #undef mpp_pass_SG_to_UG_2D_
853 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_l4_2d
854 #undef mpp_pass_SG_to_UG_3D_
855 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_l4_3d
856 #undef mpp_pass_UG_to_SG_2D_
857 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_l4_2d
858 #undef mpp_pass_UG_to_SG_3D_
859 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_l4_3d
860 #include <mpp_unstruct_pass_data.fh>
861 
862 #undef MPP_GLOBAL_FIELD_UG_2D_
863 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r8_2d
864 #undef MPP_GLOBAL_FIELD_UG_3D_
865 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r8_3d
866 #undef MPP_GLOBAL_FIELD_UG_4D_
867 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r8_4d
868 #undef MPP_GLOBAL_FIELD_UG_5D_
869 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r8_5d
870 #undef MPP_TYPE_
871 #define MPP_TYPE_ real(r8_kind)
872 #include <mpp_global_field_ug.fh>
873 
874 #undef MPP_GLOBAL_FIELD_UG_2D_
875 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i8_2d
876 #undef MPP_GLOBAL_FIELD_UG_3D_
877 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i8_3d
878 #undef MPP_GLOBAL_FIELD_UG_4D_
879 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i8_4d
880 #undef MPP_GLOBAL_FIELD_UG_5D_
881 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i8_5d
882 #undef MPP_TYPE_
883 #define MPP_TYPE_ integer(i8_kind)
884 #include <mpp_global_field_ug.fh>
885 
886 #undef MPP_GLOBAL_FIELD_UG_2D_
887 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r4_2d
888 #undef MPP_GLOBAL_FIELD_UG_3D_
889 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r4_3d
890 #undef MPP_GLOBAL_FIELD_UG_4D_
891 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r4_4d
892 #undef MPP_GLOBAL_FIELD_UG_5D_
893 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r4_5d
894 #undef MPP_TYPE_
895 #define MPP_TYPE_ real(r4_kind)
896 #include <mpp_global_field_ug.fh>
897 
898 #undef MPP_GLOBAL_FIELD_UG_2D_
899 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i4_2d
900 #undef MPP_GLOBAL_FIELD_UG_3D_
901 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i4_3d
902 #undef MPP_GLOBAL_FIELD_UG_4D_
903 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i4_4d
904 #undef MPP_GLOBAL_FIELD_UG_5D_
905 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i4_5d
906 #undef MPP_TYPE_
907 #define MPP_TYPE_ integer(i4_kind)
908 #include <mpp_global_field_ug.fh>
909 !> @}
subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
logical function mpp_domainug_ne(a, b)
Overload the .ne. for UG.
subroutine mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
Computes extents for a grid decomposition with the given indices and divisions.
subroutine mpp_broadcast_domain_ug(domain)
Broadcast domain (useful only outside the context of its own pelist)
logical function mpp_domainug_eq(a, b)
Overload the .eq. for UG.
integer function stdout()
This function returns the current standard fortran unit numbers for output.
Definition: mpp_util.inc:42
integer function stderr()
This function returns the current standard fortran unit numbers for error messages.
Definition: mpp_util.inc:50
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:420
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:406