FMS  2026.01.01
Flexible Modeling System
mpp_unstruct_domain.inc
1 !***********************************************************************
2 !* Apache License 2.0
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* Licensed under the Apache License, Version 2.0 (the "License");
7 !* you may not use this file except in compliance with the License.
8 !* You may obtain a copy of the License at
9 !*
10 !* http://www.apache.org/licenses/LICENSE-2.0
11 !*
12 !* FMS is distributed in the hope that it will be useful, but WITHOUT
13 !* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied;
14 !* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 !* PARTICULAR PURPOSE. See the License for the specific language
16 !* governing permissions and limitations under the License.
17 !***********************************************************************
18 !> @file
19 !> @brief Routines for defining and managing unstructured grids
20 
21 !> @addtogroup mpp_domains_mod
22 !> @{
23  !#####################################################################
24  subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, &
25  & grid_index, name)
26  type(domainug), intent(inout) :: UG_domain
27  type(domain2d), target, intent(in) :: SG_domain
28  integer, intent(in) :: npts_tile(:) !< number of unstructured points in
29  !! each mosaic tile
30  integer, intent(in) :: grid_nlev(:) !< number of levels (subgrid tiles) in
31  !! each unstructured grid cell.
32  integer, intent(in) :: ndivs
33  integer, intent(in) :: npes_io_group!< number of processors in a io group.
34  !! Only pe with same tile_id
35  !! in the same group
36  integer, intent(in) :: grid_index(:)
37  character(len=*), optional, intent(in) :: name
38  integer, dimension(size(npts_tile(:))) :: ndivs_tile, pe_start, pe_end
39  integer, dimension(0:ndivs-1) :: ibegin, iend, costs_list
40  integer :: ntiles, ndivs_used, cur_tile
41  integer :: n, ts, te, p, pos, tile_id, ngroup, group_id, my_pos, i
42  integer :: npes_in_group, is, ie, ntotal_costs, max_cost, cur_cost, costs_left
43  integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
44  real :: avg_cost
45  integer :: costs(size(npts_tile(:)))
46 
47  ug_domain%SG_domain => sg_domain
48  ntiles = size(npts_tile(:))
49  ug_domain%ntiles = ntiles
50 
51  !--- total number of points must be no less than ndivs
52  if(sum(npts_tile)<ndivs) call mpp_error(fatal, &
53  & "mpp_define_unstruct_domain: total number of points is less than ndivs")
54  !--- We are assuming nlev on each grid is at least one.
55  do n = 1, size(grid_nlev(:))
56  if(grid_nlev(n) < 1) call mpp_error(fatal, &
57  & "mpp_define_unstruct_domain: grid_nlev at some point is less than 1")
58  enddo
59 
60  !-- costs for each mosaic tile.
61  pos = 0
62  do n = 1, ntiles
63  costs(n) = 0
64  do i = 1, npts_tile(n)
65  pos = pos + 1
66  costs(n) = costs(n) + grid_nlev(pos)
67  enddo
68  enddo
69  ! compute number of divisions for each mosaic tile.
70  ntotal_costs = sum(costs)
71  !--- get the upper limit of ndivs for each mosaic tile.
72  do n = 1, ntiles
73  ndivs_tile(n) = ceiling(real(costs(n),kind=r8_kind)*real(ndivs,kind=r8_kind)/ntotal_costs)
74  enddo
75 
76  ndivs_used = sum(ndivs_tile)
77  do while (ndivs_used > ndivs)
78  max_cost = 0
79  cur_tile = 0
80  do n = 1, ntiles
81  if( ndivs_tile(n) > 1 ) then
82  cur_cost = ceiling(real(costs(n))/(ndivs_tile(n)-1))
83  if( max_cost == 0 .OR. cur_cost<max_cost) then
84  max_cost = cur_cost
85  cur_tile = n
86  endif
87  endif
88  enddo
89  ndivs_used = ndivs_used-1
90  ndivs_tile(cur_tile) = ndivs_tile(cur_tile) - 1
91  enddo
92 
93  te = -1
94  ioff = 0
95  do n = 1, ntiles
96  ts = te + 1
97  te = ts + ndivs_tile(n) - 1
98  costs_left = costs(n)
99  ndiv_left = ndivs_tile(n)
100  npts_left = npts_tile(n)
101  cur_pos = 1
102  do ndiv = 1, ndivs_tile(n)
103  cur_cost = 0
104  ibegin(ts+ndiv-1) = cur_pos
105  avg_cost = real(costs_left)/ndiv_left
106  do i = cur_pos, npts_tile(n)
107  cur_cost = cur_cost + grid_nlev(i+ioff)
108  costs_left = costs_left - grid_nlev(i+ioff)
109  if(npts_left < ndiv_left ) then
110  call mpp_error(fatal, "mpp_define_unstruct_domain: npts_left < ndiv_left")
111  else if(npts_left == ndiv_left ) then
112  cur_pos = i + 1
113  exit
114  else if(cur_cost .GE. avg_cost) then
115  prev_cost = cur_cost - grid_nlev(i+ioff)
116  if(i==cur_pos) then
117  cur_pos = i + 1
118  exit
119  else if( cur_cost - avg_cost .LE. avg_cost - prev_cost ) then
120  cur_pos = i + 1
121  exit
122  else
123  cur_pos = i
124  cur_cost = prev_cost
125  costs_left = costs_left + grid_nlev(i+ioff)
126  npts_left = npts_left+1
127  exit
128  endif
129  endif
130  npts_left = npts_left-1
131  enddo
132  iend(ts+ndiv-1) = cur_pos - 1
133  costs_list(ts+ndiv-1) = cur_cost
134  ndiv_left = ndiv_left-1
135  npts_left = npts_left-1
136  enddo
137  pe_start(n) = ts
138  pe_end(n) = te
139  ioff = ioff+ npts_tile(n)
140  enddo
141  if (associated(ug_domain%list)) deallocate(ug_domain%list) !< Check if allocated
142  allocate(ug_domain%list(0:ndivs-1))
143  do p = 0, ndivs-1
144  ug_domain%list(p)%compute%begin = ibegin(p)
145  ug_domain%list(p)%compute%end = iend(p)
146  ug_domain%list(p)%compute%size = ug_domain%list(p)%compute%end - ug_domain%list(p)%compute%begin + 1
147  ug_domain%list(p)%compute%max_size = 0
148  ug_domain%list(p)%pos = p
149  ug_domain%list(p)%pe = p + mpp_root_pe()
150  pos = 0
151  do n = 1, ntiles
152  if( p .GE. pe_start(n) .AND. p .LE. pe_end(n) ) then
153  ug_domain%list(p)%tile_id = n
154  exit
155  endif
156  pos = pos + npts_tile(n)
157  enddo
158  is = ug_domain%list(p)%compute%begin+pos
159  ie = ug_domain%list(p)%compute%end+pos
160  ug_domain%list(p)%compute%begin_index = minval(grid_index(is:ie))
161  ug_domain%list(p)%compute%end_index = maxval(grid_index(is:ie))
162  enddo
163 
164  !--- write out domain decomposition from root pe
165  if(mpp_pe() == mpp_root_pe() .and. present(name)) then
166  write(stdout(),*) "unstruct domain name = ", trim(name)
167  write(stdout(),*) ug_domain%list(:)%compute%size
168  endif
169 
170  pos = mpp_pe() - mpp_root_pe()
171  ug_domain%pe = mpp_pe()
172  ug_domain%pos = pos
173  ug_domain%tile_id = ug_domain%list(pos)%tile_id
174  p = pe_start(ug_domain%tile_id)
175  ug_domain%tile_root_pe = ug_domain%list(p)%pe
176  ug_domain%tile_npes = pe_end(ug_domain%tile_id) - pe_start(ug_domain%tile_id) + 1
177  ug_domain%compute = ug_domain%list(pos)%compute
178  ug_domain%compute%max_size = maxval( ug_domain%list(:)%compute%size )
179  ug_domain%global%begin = 1
180  ug_domain%global%end = npts_tile(ug_domain%tile_id)
181  ug_domain%global%size = ug_domain%global%end - ug_domain%global%begin + 1
182  ug_domain%global%max_size = -1 ! currently this is not supposed to be used.
183  pos = 0
184  do n = 1, ug_domain%tile_id-1
185  pos = pos + npts_tile(n)
186  enddo
187  ug_domain%global%begin_index = grid_index(pos+1)
188  ug_domain%global%end_index = grid_index(pos+npts_tile(n))
189 
190  if (associated(ug_domain%grid_index)) deallocate(ug_domain%grid_index) !< Check if allocated
191  allocate(ug_domain%grid_index(ug_domain%compute%size))
192  do n = 1, ug_domain%compute%size
193  ug_domain%grid_index(n) = grid_index(pos+ug_domain%compute%begin+n-1)
194  enddo
195 
196  !--- define io_domain
197  if (associated(ug_domain%io_domain)) deallocate(ug_domain%io_domain) !< Check if allocated
198  allocate(ug_domain%io_domain)
199  tile_id = ug_domain%tile_id
200  ug_domain%io_domain%pe = ug_domain%pe
201  !--- figure out number groups for current tile
202  if(npes_io_group == 0) then
203  ngroup = 1
204  else
205  ngroup = ceiling(real(ndivs_tile(tile_id))/ npes_io_group)
206  endif
207 
208 !----------
209 !ug support
210  ug_domain%npes_io_group = npes_io_group
211  ug_domain%io_layout = ngroup
212 !----------
213 
214  call mpp_compute_extent(1, ndivs_tile(tile_id), ngroup, ibegin(0:ngroup-1), iend(0:ngroup-1))
215  my_pos = ug_domain%pe - ug_domain%tile_root_pe + 1
216  do n = 0, ngroup-1
217  if( my_pos .GE. ibegin(n) .AND. my_pos .LE. iend(n) ) then
218  group_id = n
219  exit
220  endif
221  enddo
222 
223  ug_domain%io_domain%tile_id = group_id+1
224  ug_domain%io_domain%compute = ug_domain%compute
225  ug_domain%io_domain%pe = ug_domain%pe
226  ug_domain%io_domain%pos = my_pos - ibegin(group_id) + 1
227  ug_domain%io_domain%tile_root_pe = ibegin(group_id) + ug_domain%tile_root_pe - 1
228  pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe()
229  ug_domain%io_domain%global%begin = ug_domain%list(pos)%compute%begin
230  ug_domain%io_domain%global%begin_index = ug_domain%list(pos)%compute%begin_index
231  pos = iend(group_id) + ug_domain%tile_root_pe - mpp_root_pe() - 1
232  ug_domain%io_domain%global%end = ug_domain%list(pos)%compute%end
233  ug_domain%io_domain%global%end_index = ug_domain%list(pos)%compute%end_index
234  ug_domain%io_domain%global%size = ug_domain%io_domain%global%end - ug_domain%io_domain%global%begin + 1
235 
236  npes_in_group = iend(group_id) - ibegin(group_id) + 1
237  if (associated(ug_domain%io_domain%list)) deallocate(ug_domain%io_domain%list) !< Check if allocated
238  allocate(ug_domain%io_domain%list(0:npes_in_group-1))
239  do n = 0, npes_in_group-1
240  pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe() + n
241  ug_domain%io_domain%list(n)%compute = ug_domain%list(pos)%compute
242  ug_domain%io_domain%list(n)%pos = n
243  ug_domain%io_domain%list(n)%pe = ug_domain%list(pos)%pe
244  ug_domain%io_domain%list(n)%tile_id = group_id+1
245  enddo
246 
247  call compute_overlap_sg2ug(ug_domain, sg_domain)
248  call compute_overlap_ug2sg(ug_domain)
249 
250  return
251 
252  end subroutine mpp_define_unstruct_domain
253 
254 
255  !####################################################################
256  subroutine compute_overlap_sg2ug(UG_domain, SG_domain)
257  type(domainug), intent(inout) :: UG_domain
258  type(domain2d), intent(in) :: SG_domain
259  integer, dimension(0:size(SG_domain%list(:))-1) :: send_cnt, recv_cnt
260  integer, dimension(0:size(SG_domain%list(:))-1) :: send_buffer_pos, recv_buffer_pos
261  integer, dimension(:), allocatable :: send_buffer, recv_buffer, index_list
262  integer, dimension(:), allocatable :: buffer_pos
263  integer :: tile_id, nlist, nxg, begin_index, end_index, i, j
264  integer :: m, n, list, l, isc, iec, jsc, jec, ibegin, iend, grid_index
265  integer :: nrecv, nsend, send_pos, recv_pos, pos
266 
267  !--- figure out the recv index information.
268  tile_id = ug_domain%tile_id
269  nlist = size(sg_domain%list(:))
270  nxg = sg_domain%x(1)%global%size
271  begin_index = ug_domain%compute%begin_index
272  end_index = ug_domain%compute%end_index
273  pos = 0
274  recv_cnt = 0
275  allocate(index_list(ug_domain%compute%size))
276  allocate(send_buffer(ug_domain%compute%size))
277  index_list = -1
278  do n = 0, nlist-1
279  if(sg_domain%list(n)%tile_id(1) .NE. tile_id) cycle
280  isc = sg_domain%list(n)%x(1)%compute%begin; iec = sg_domain%list(n)%x(1)%compute%end
281  jsc = sg_domain%list(n)%y(1)%compute%begin; jec = sg_domain%list(n)%y(1)%compute%end
282  ibegin = (jsc-1)*nxg + isc
283  iend = (jec-1)*nxg + iec
284  if(ibegin > end_index .OR. iend < begin_index) cycle
285  do l = 1, ug_domain%compute%size
286  grid_index = ug_domain%grid_index(l)
287  i = mod((grid_index-1), nxg) + 1
288  j = (grid_index-1)/nxg + 1
289  if( i .GE. isc .AND. i .LE. iec .and. j .GE. jsc .AND. j .LE. jec ) then
290  recv_cnt(n) = recv_cnt(n) + 1
291  pos = pos + 1
292  if(pos > ug_domain%compute%size) call mpp_error(fatal, &
293  'compute_overlap_SG2UG: pos > UG_domain%compute%size')
294  index_list(pos) = l
295  send_buffer(pos) = grid_index
296  endif
297  enddo
298  enddo
299 
300  !--- make sure sum(recv_cnt) == UG_domain%compute%size
301  if( ug_domain%compute%size .NE. sum(recv_cnt) ) then
302  print*,"pe=", mpp_pe(), ug_domain%compute%size, sum(recv_cnt)
303  call mpp_error(fatal, &
304  .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(recv_cnt)")
305  endif
306  allocate(buffer_pos(0:nlist-1))
307  pos = 0
308  do list = 0,nlist-1
309  buffer_pos(list) = pos
310  pos = pos + recv_cnt(list)
311  enddo
312 
313  nrecv = count( recv_cnt > 0 )
314  ug_domain%SG2UG%nrecv = nrecv
315  if (associated(ug_domain%SG2UG%recv)) deallocate(ug_domain%SG2UG%recv) !< Check if allocated
316  allocate(ug_domain%SG2UG%recv(nrecv))
317  nrecv = 0
318  pos = 0
319  do list = 0,nlist-1
320  m = mod( sg_domain%pos+nlist-list, nlist )
321  if( recv_cnt(m) > 0 ) then
322  nrecv = nrecv+1
323  ug_domain%SG2UG%recv(nrecv)%count = recv_cnt(m)
324  ug_domain%SG2UG%recv(nrecv)%pe = ug_domain%list(m)%pe
325  allocate(ug_domain%SG2UG%recv(nrecv)%i(recv_cnt(m)))
326  pos = buffer_pos(m)
327  do l = 1, recv_cnt(m)
328  pos = pos + 1
329  ug_domain%SG2UG%recv(nrecv)%i(l) = index_list(pos)
330  enddo
331  endif
332  enddo
333 
334  !--- figure out the send index information.
335  send_cnt = recv_cnt
336  recv_cnt = 0
337  call mpp_alltoall(send_cnt,1,recv_cnt,1)
338  !--- make sure sum(send_cnt) == UG_domain%compute%size
339  if( ug_domain%compute%size .NE. sum(send_cnt) ) call mpp_error(fatal, &
340  .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(send_cnt)")
341  allocate(recv_buffer(sum(recv_cnt)))
342  send_buffer_pos = 0; recv_buffer_pos = 0
343  send_pos = 0; recv_pos = 0
344  do n = 0, nlist-1
345  if(send_cnt(n) > 0) then
346  send_buffer_pos(n) = send_pos
347  send_pos = send_pos + send_cnt(n)
348  endif
349  if(recv_cnt(n) > 0) then
350  recv_buffer_pos(n) = recv_pos
351  recv_pos = recv_pos + recv_cnt(n)
352  endif
353  enddo
354 
355  call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
356  recv_buffer, recv_cnt, recv_buffer_pos)
357 
358  nsend = count( recv_cnt(:) > 0 )
359  ug_domain%SG2UG%nsend = nsend
360  if (associated(ug_domain%SG2UG%send)) deallocate(ug_domain%SG2UG%send) !< Check if allocated
361  allocate(ug_domain%SG2UG%send(nsend))
362  nsend = 0
363  isc = sg_domain%x(1)%compute%begin
364  jsc = sg_domain%y(1)%compute%begin
365  do list = 0,nlist-1
366  m = mod( sg_domain%pos+list, nlist )
367  if( recv_cnt(m) > 0 ) then
368  nsend = nsend+1
369  ug_domain%SG2UG%send(nsend)%count = recv_cnt(m)
370  ug_domain%SG2UG%send(nsend)%pe = ug_domain%list(m)%pe
371  allocate(ug_domain%SG2UG%send(nsend)%i(recv_cnt(m)))
372  allocate(ug_domain%SG2UG%send(nsend)%j(recv_cnt(m)))
373  pos = recv_buffer_pos(m)
374  do l = 1, recv_cnt(m)
375  grid_index = recv_buffer(pos+l)
376  ug_domain%SG2UG%send(nsend)%i(l) = mod(grid_index-1,nxg) + 1
377  ug_domain%SG2UG%send(nsend)%j(l) = (grid_index-1)/nxg + 1
378  enddo
379  endif
380  enddo
381  deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
382 
383 return
384 
385  end subroutine compute_overlap_sg2ug
386 
387  !####################################################################
388  subroutine compute_overlap_ug2sg(UG_domain)
389  type(domainug), intent(inout) :: UG_domain
390 
391  !--- UG2SG is the reverse of SG2UG
392  ug_domain%UG2SG%nsend = ug_domain%SG2UG%nrecv
393  ug_domain%UG2SG%send => ug_domain%SG2UG%recv
394  ug_domain%UG2SG%nrecv = ug_domain%SG2UG%nsend
395  ug_domain%UG2SG%recv => ug_domain%SG2UG%send
396 
397  return
398 
399  end subroutine compute_overlap_ug2sg
400 
401  !####################################################################
402  subroutine mpp_get_ug_sg_domain(UG_domain,SG_domain)
403  type(domainug), intent(inout) :: UG_domain
404  type(domain2d), pointer :: SG_domain
405 
406  sg_domain => ug_domain%SG_domain
407 
408  return
409 
410  end subroutine mpp_get_ug_sg_domain
411 
412  !####################################################################
413  function mpp_get_ug_io_domain(domain)
414  type(domainug), intent(in) :: domain
415  type(domainug), pointer :: mpp_get_UG_io_domain
416 
417  if(ASSOCIATED(domain%io_domain)) then
418  mpp_get_ug_io_domain => domain%io_domain
419  else
420  call mpp_error(fatal, "mpp_get_UG_io_domain: io_domain is not defined, contact developer")
421  endif
422 
423  end function mpp_get_ug_io_domain
424 
425  !#####################################################################
426  subroutine mpp_get_ug_compute_domain( domain, begin, end, size)
427  type(domainug), intent(in) :: domain
428  integer, intent(out), optional :: begin, end, size
429 
430  if( PRESENT(begin) )begin = domain%compute%begin
431  if( PRESENT(end) )end = domain%compute%end
432  if( PRESENT(size) )size = domain%compute%size
433  return
434  end subroutine mpp_get_ug_compute_domain
435 
436  !#####################################################################
437  subroutine mpp_get_ug_global_domain( domain, begin, end, size)
438  type(domainug), intent(in) :: domain
439  integer, intent(out), optional :: begin, end, size
440 
441  if( PRESENT(begin) )begin = domain%global%begin
442  if( PRESENT(end) )end = domain%global%end
443  if( PRESENT(size) )size = domain%global%size
444  return
445  end subroutine mpp_get_ug_global_domain
446 
447  !#####################################################################
448  subroutine mpp_get_ug_compute_domains( domain, begin, end, size )
449  type(domainug), intent(in) :: domain
450  integer, intent(out), optional, dimension(:) :: begin, end, size
451 
452  !we use shape instead of size for error checks because size is used as an argument
453  if( PRESENT(begin) )then
454  if( any(shape(begin).NE.shape(domain%list)) ) &
455  call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
456  begin(:) = domain%list(:)%compute%begin
457  end if
458  if( PRESENT(end) )then
459  if( any(shape(end).NE.shape(domain%list)) ) &
460  call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
461  end(:) = domain%list(:)%compute%end
462  end if
463  if( PRESENT(size) )then
464  if( any(shape(size).NE.shape(domain%list)) ) &
465  call mpp_error( fatal, 'mpp_get_UG_compute_domains: size array size does not match domain.' )
466  size(:) = domain%list(:)%compute%size
467  end if
468  return
469  end subroutine mpp_get_ug_compute_domains
470 
471  !#####################################################################
472  subroutine mpp_get_ug_domains_index( domain, begin, end)
473  type(domainug), intent(in) :: domain
474  integer, intent(out), dimension(:) :: begin, end
475 
476  !we use shape instead of size for error checks because size is used as an argument
477  if( any(shape(begin).NE.shape(domain%list)) ) &
478  call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
479  begin(:) = domain%list(:)%compute%begin_index
480  if( any(shape(end).NE.shape(domain%list)) ) &
481  call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
482  end(:) = domain%list(:)%compute%end_index
483  return
484  end subroutine mpp_get_ug_domains_index
485 
486  !#####################################################################
487  function mpp_get_ug_domain_ntiles(domain)
488  type(domainug), intent(in) :: domain
489  integer :: mpp_get_UG_domain_ntiles
490 
491  mpp_get_ug_domain_ntiles = domain%ntiles
492  return
493  end function mpp_get_ug_domain_ntiles
494 
495  !#######################################################################
496  subroutine mpp_get_ug_domain_tile_list(domain, tiles)
497  type(domainug), intent(in) :: domain
498  integer, intent(inout) :: tiles(:)
499  integer :: i
500 
501  if( size(tiles(:)).NE.size(domain%list(:)) ) &
502  call mpp_error( fatal, 'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
503  do i = 1, size(tiles(:))
504  tiles(i) = domain%list(i-1)%tile_id
505  end do
506 
507  end subroutine mpp_get_ug_domain_tile_list
508 
509  !#####################################################################
510  function mpp_get_ug_domain_tile_id(domain)
511  type(domainug), intent(in) :: domain
512  integer :: mpp_get_UG_domain_tile_id
513 
514  mpp_get_ug_domain_tile_id = domain%tile_id
515  return
516  end function mpp_get_ug_domain_tile_id
517 
518  !####################################################################
519  function mpp_get_ug_domain_npes(domain)
520  type(domainug), intent(in) :: domain
521  integer :: mpp_get_UG_domain_npes
522 
523  mpp_get_ug_domain_npes = size(domain%list(:))
524  return
525 
526  end function mpp_get_ug_domain_npes
527 
528 
529  !####################################################################
530  subroutine mpp_get_ug_domain_pelist( domain, pelist)
531  type(domainug), intent(in) :: domain
532  integer, intent(out) :: pelist(:)
533 
534  if( size(pelist(:)).NE.size(domain%list(:)) ) &
535  call mpp_error( fatal, 'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
536 
537  pelist(:) = domain%list(:)%pe
538  return
539 
540  end subroutine mpp_get_ug_domain_pelist
541 
542  !###################################################################
543  subroutine mpp_get_ug_domain_tile_pe_inf( domain, root_pe, npes, pelist)
544  type(domainug), intent(in) :: domain
545  integer, optional, intent(out) :: root_pe, npes
546  integer, optional, intent(out) :: pelist(:)
547 
548  if(present(root_pe)) root_pe = domain%tile_root_pe
549  if(present(npes)) npes = domain%tile_npes
550 
551  if(present(pelist)) then
552  if( size(pelist(:)).NE. domain%tile_npes ) &
553  call mpp_error( fatal, 'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
554  pelist(:) = domain%list(domain%pos:domain%pos+domain%tile_npes-1)%pe
555  endif
556  return
557 
558  end subroutine mpp_get_ug_domain_tile_pe_inf
559 
560 
561  !####################################################################
562  subroutine mpp_get_ug_domain_grid_index( domain, grid_index)
563  type(domainug), intent(in) :: domain
564  integer, intent(out) :: grid_index(:)
565 
566  if( size(grid_index(:)).NE.size(domain%grid_index(:)) ) &
567  call mpp_error( fatal, 'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
568 
569  grid_index(:) = domain%grid_index(:)
570  return
571 
572  end subroutine mpp_get_ug_domain_grid_index
573 
574  !###################################################################
575  subroutine mpp_define_null_ug_domain(domain)
576  type(domainug), intent(inout) :: domain
577 
578  domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
579  domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
580  domain%pe = null_pe
581  domain%ntiles = -1
582  domain%pos = -1
583  domain%tile_id = -1
584  domain%tile_root_pe = -1
585 
586  end subroutine mpp_define_null_ug_domain
587 
588 !##############################################################################
589  !> @brief Broadcast domain (useful only outside the context of its own pelist)
590  subroutine mpp_broadcast_domain_ug( domain )
591  type(domainug), intent(inout) :: domain
592  integer, allocatable :: pes(:)
593  logical :: native !true if I'm on the pelist of this domain
594  integer :: listsize, listpos
595  integer :: n
596  integer, dimension(7) :: msg, info !pe and compute domain of each item in list
597  integer :: errunit
598 
599  errunit = stderr()
600  if( .NOT.module_is_initialized ) &
601  call mpp_error( fatal, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.' )
602 
603 !get the current pelist
604  allocate( pes(0:mpp_npes()-1) )
605  call mpp_get_current_pelist(pes)
606 
607 !am I part of this domain?
608  native = ASSOCIATED(domain%list)
609 
610 !set local list size
611  if( native )then
612  listsize = size(domain%list(:))
613  else
614  listsize = 0
615  end if
616  call mpp_max(listsize)
617 
618  if( .NOT.native )then
619 !initialize domain%list and set null values in message
620  if (associated(domain%list)) deallocate(domain%list) !< Check if allocated
621  allocate( domain%list(0:listsize-1) )
622  domain%pe = null_pe
623  domain%pos = -1
624  domain%ntiles = -1
625  domain%compute%begin = 1
626  domain%compute%end = -1
627  domain%compute%begin_index = 1
628  domain%compute%end_index = -1
629  domain%global %begin = -1
630  domain%global %end = -1
631  domain%tile_id = -1
632  domain%tile_root_pe = -1
633  end if
634 !initialize values in info
635  info(1) = domain%pe
636  info(2) = domain%pos
637  info(3) = domain%tile_id
638  call mpp_get_ug_compute_domain( domain, info(4), info(5))
639  info(6) = domain%compute%begin_index
640  info(7) = domain%compute%end_index
641 !broadcast your info across current pelist and unpack if needed
642  listpos = 0
643  do n = 0,mpp_npes()-1
644  msg = info
645  if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
646  call mpp_broadcast( msg, 7, pes(n) )
647 !no need to unpack message if native
648 !no need to unpack message from non-native PE
649  if( .NOT.native .AND. msg(1).NE.null_pe )then
650  domain%list(listpos)%pe = msg(1)
651  domain%list(listpos)%pos = msg(2)
652  domain%list(listpos)%tile_id = msg(3)
653  domain%list(listpos)%compute%begin = msg(4)
654  domain%list(listpos)%compute%end = msg(5)
655  domain%list(listpos)%compute%begin_index = msg(6)
656  domain%list(listpos)%compute%end_index = msg(7)
657  listpos = listpos + 1
658  if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'ls,le=', msg(4:5)
659  end if
660  end do
661 
662  end subroutine mpp_broadcast_domain_ug
663 
664 !------------------------------------------------------------------------------
665 function mpp_domain_ug_is_tile_root_pe(domain) result(is_root)
666 
667  !<Inputs/Outputs
668  type(domainug),intent(in) :: domain
669  logical(l8_kind) :: is_root
670 
671  if (domain%pe .eq. domain%tile_root_pe) then
672  is_root = .true.
673  else
674  is_root = .false.
675  endif
676 
677  return
678 end function mpp_domain_ug_is_tile_root_pe
679 
680 !------------------------------------------------------------------------------
681 !HELP: There needs to be a subroutine to return the "io_layout" for
682 ! an unstructured domain, so I made one. Someone should check
683 ! to see if this is correct.
684 function mpp_get_io_domain_ug_layout(domain) result(io_layout)
685 
686  !<Inputs/Outputs
687  type(domainug),intent(in) :: domain
688  integer(i4_kind) :: io_layout
689 
690  io_layout = domain%io_layout
691 
692  return
693 end function
694 
695 
696 !------------------------------------------------------------------
697 subroutine deallocate_unstruct_overlap_type(overlap)
698  type(unstruct_overlap_type), intent(inout) :: overlap
699 
700  if(associated(overlap%i)) deallocate(overlap%i)
701  if(associated(overlap%j)) deallocate(overlap%j)
702 
703 end subroutine deallocate_unstruct_overlap_type
704 
705 !------------------------------------------------------------------
706 subroutine deallocate_unstruct_pass_type(domain)
707  type(domainug), intent(inout) :: domain
708  integer :: n
709 
710  do n = 1, domain%UG2SG%nsend
711  call deallocate_unstruct_overlap_type(domain%UG2SG%send(n))
712  enddo
713  do n = 1, domain%UG2SG%nrecv
714  call deallocate_unstruct_overlap_type(domain%UG2SG%recv(n))
715  enddo
716 
717  ! SG2UG%{send,recv} point to the same memory as UG2SG%{send,recv}
718  ! respectively. Thus, we only need to `deallocate` one, and nullify
719  ! the other set.
720  if(associated(domain%UG2SG%send)) then
721  deallocate(domain%UG2SG%send)
722  nullify(domain%UG2SG%send)
723  nullify(domain%SG2UG%recv)
724  end if
725  if(associated(domain%UG2SG%recv)) then
726  deallocate(domain%UG2SG%recv)
727  nullify(domain%UG2SG%recv)
728  nullify(domain%SG2UG%send)
729  end if
730 end subroutine deallocate_unstruct_pass_type
731 
732 !------------------------------------------------------------------
733 subroutine mpp_deallocate_domainug(domain)
734 
735  !<Inputs/Outputs
736  type(domainug),intent(inout) :: domain
737 
738  if (associated(domain%list)) then
739  deallocate(domain%list)
740  domain%list => null()
741  endif
742 
743  if (associated(domain%io_domain)) then
744  if (associated(domain%io_domain%list)) then
745  deallocate(domain%io_domain%list)
746  domain%io_domain%list => null()
747  endif
748  deallocate(domain%io_domain)
749  domain%io_domain => null()
750  endif
751 
752  call deallocate_unstruct_pass_type(domain)
753 
754  if (associated(domain%grid_index)) then
755  deallocate(domain%grid_index)
756  domain%grid_index => null()
757  endif
758 
759  if (associated(domain%SG_domain)) then
760  domain%SG_domain => null()
761  endif
762 
763  return
764 end subroutine mpp_deallocate_domainug
765 
766  !###################################################################
767  !> Overload the .eq. for UG
768  function mpp_domainug_eq( a, b )
769  logical :: mpp_domainug_eq
770  type(domainug), intent(in) :: a, b
771 
772  if (associated(a%SG_domain) .and. associated(b%SG_domain)) then
773  if (a%SG_domain .ne. b%SG_domain) then
774  mpp_domainug_eq = .false.
775  return
776  endif
777  elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then
778  mpp_domainug_eq = .false.
779  return
780  elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then
781  mpp_domainug_eq = .false.
782  return
783  endif
784 
785  mpp_domainug_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. &
786  (a%pos .EQ. b%pos) .AND. &
787  (a%ntiles .EQ. b%ntiles) .AND. &
788  (a%tile_id .EQ. b%tile_id) .AND. &
789  (a%tile_npes .EQ. b%tile_npes) .AND. &
790  (a%tile_root_pe .EQ. b%tile_root_pe)
791 
792  if(.not. mpp_domainug_eq) return
793 
794  mpp_domainug_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
795  a%compute%end .EQ.b%compute%end .AND. &
796  a%global%begin .EQ.b%global%begin .AND. &
797  a%global%end .EQ.b%global%end .AND. &
798  a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. &
799  a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. &
800  a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. &
801  a%UG2SG%nrecv .EQ.b%UG2SG%nrecv &
802  )
803 
804  return
805  end function mpp_domainug_eq
806 
807  !> Overload the .ne. for UG
808  function mpp_domainug_ne( a, b )
809  logical :: mpp_domainug_ne
810  type(domainug), intent(in) :: a, b
811 
812  mpp_domainug_ne = .NOT. ( a.EQ.b )
813  return
814  end function mpp_domainug_ne
815 
816 #undef MPP_TYPE_
817 #define MPP_TYPE_ real(r8_kind)
818 #undef mpp_pass_SG_to_UG_2D_
819 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r8_2d
820 #undef mpp_pass_SG_to_UG_3D_
821 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r8_3d
822 #undef mpp_pass_UG_to_SG_2D_
823 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r8_2d
824 #undef mpp_pass_UG_to_SG_3D_
825 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r8_3d
826 #include <mpp_unstruct_pass_data.fh>
827 
828 #undef MPP_TYPE_
829 #define MPP_TYPE_ real(r4_kind)
830 #undef mpp_pass_SG_to_UG_2D_
831 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r4_2d
832 #undef mpp_pass_SG_to_UG_3D_
833 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r4_3d
834 #undef mpp_pass_UG_to_SG_2D_
835 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r4_2d
836 #undef mpp_pass_UG_to_SG_3D_
837 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r4_3d
838 #include <mpp_unstruct_pass_data.fh>
839 
840 #undef MPP_TYPE_
841 #define MPP_TYPE_ integer(i4_kind)
842 #undef mpp_pass_SG_to_UG_2D_
843 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_i4_2d
844 #undef mpp_pass_SG_to_UG_3D_
845 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_i4_3d
846 #undef mpp_pass_UG_to_SG_2D_
847 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_i4_2d
848 #undef mpp_pass_UG_to_SG_3D_
849 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_i4_3d
850 #include <mpp_unstruct_pass_data.fh>
851 
852 #undef MPP_TYPE_
853 #define MPP_TYPE_ logical(i4_kind)
854 #undef mpp_pass_SG_to_UG_2D_
855 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_l4_2d
856 #undef mpp_pass_SG_to_UG_3D_
857 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_l4_3d
858 #undef mpp_pass_UG_to_SG_2D_
859 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_l4_2d
860 #undef mpp_pass_UG_to_SG_3D_
861 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_l4_3d
862 #include <mpp_unstruct_pass_data.fh>
863 
864 #undef MPP_GLOBAL_FIELD_UG_2D_
865 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r8_2d
866 #undef MPP_GLOBAL_FIELD_UG_3D_
867 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r8_3d
868 #undef MPP_GLOBAL_FIELD_UG_4D_
869 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r8_4d
870 #undef MPP_GLOBAL_FIELD_UG_5D_
871 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r8_5d
872 #undef MPP_TYPE_
873 #define MPP_TYPE_ real(r8_kind)
874 #include <mpp_global_field_ug.fh>
875 
876 #undef MPP_GLOBAL_FIELD_UG_2D_
877 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i8_2d
878 #undef MPP_GLOBAL_FIELD_UG_3D_
879 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i8_3d
880 #undef MPP_GLOBAL_FIELD_UG_4D_
881 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i8_4d
882 #undef MPP_GLOBAL_FIELD_UG_5D_
883 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i8_5d
884 #undef MPP_TYPE_
885 #define MPP_TYPE_ integer(i8_kind)
886 #include <mpp_global_field_ug.fh>
887 
888 #undef MPP_GLOBAL_FIELD_UG_2D_
889 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r4_2d
890 #undef MPP_GLOBAL_FIELD_UG_3D_
891 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r4_3d
892 #undef MPP_GLOBAL_FIELD_UG_4D_
893 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r4_4d
894 #undef MPP_GLOBAL_FIELD_UG_5D_
895 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r4_5d
896 #undef MPP_TYPE_
897 #define MPP_TYPE_ real(r4_kind)
898 #include <mpp_global_field_ug.fh>
899 
900 #undef MPP_GLOBAL_FIELD_UG_2D_
901 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i4_2d
902 #undef MPP_GLOBAL_FIELD_UG_3D_
903 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i4_3d
904 #undef MPP_GLOBAL_FIELD_UG_4D_
905 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i4_4d
906 #undef MPP_GLOBAL_FIELD_UG_5D_
907 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i4_5d
908 #undef MPP_TYPE_
909 #define MPP_TYPE_ integer(i4_kind)
910 #include <mpp_global_field_ug.fh>
911 !> @}
subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
logical function mpp_domainug_ne(a, b)
Overload the .ne. for UG.
subroutine mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
Computes extents for a grid decomposition with the given indices and divisions.
subroutine mpp_broadcast_domain_ug(domain)
Broadcast domain (useful only outside the context of its own pelist)
logical function mpp_domainug_eq(a, b)
Overload the .eq. for UG.
integer function stdout()
This function returns the current standard fortran unit numbers for output.
Definition: mpp_util.inc:42
integer function stderr()
This function returns the current standard fortran unit numbers for error messages.
Definition: mpp_util.inc:50
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:420
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:406