FMS 2025.01-dev
Flexible Modeling System
Loading...
Searching...
No Matches
mpp_unstruct_domain.inc
1!***********************************************************************
2!* GNU Lesser General Public License
3!*
4!* This file is part of the GFDL Flexible Modeling System (FMS).
5!*
6!* FMS is free software: you can redistribute it and/or modify it under
7!* the terms of the GNU Lesser General Public License as published by
8!* the Free Software Foundation, either version 3 of the License, or (at
9!* your option) any later version.
10!*
11!* FMS is distributed in the hope that it will be useful, but WITHOUT
12!* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13!* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14!* for more details.
15!*
16!* You should have received a copy of the GNU Lesser General Public
17!* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18!***********************************************************************
19!> @file
20!> @brief Routines for defining and managing unstructured grids
21
22!> @addtogroup mpp_domains_mod
23!> @{
24 !#####################################################################
25 subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, &
26 & grid_index, name)
27 type(domainUG), intent(inout) :: UG_domain
28 type(domain2d), target, intent(in) :: SG_domain
29 integer, intent(in) :: npts_tile(:) !< number of unstructured points on each tile
30 integer, intent(in) :: grid_nlev(:) !< number of levels in each unstructured grid.
31 integer, intent(in) :: ndivs
32 integer, intent(in) :: npes_io_group!< number of processors in a io group.
33 !! Only pe with same tile_id
34 !! in the same group
35 integer, intent(in) :: grid_index(:)
36 character(len=*), optional, intent(in) :: name
37 integer, dimension(size(npts_tile(:))) :: ndivs_tile, pe_start, pe_end
38 integer, dimension(0:ndivs-1) :: ibegin, iend, costs_list
39 integer :: ntiles, ndivs_used, cur_tile
40 integer :: n, ts, te, p, pos, tile_id, ngroup, group_id, my_pos, i
41 integer :: npes_in_group, is, ie, ntotal_costs, max_cost, cur_cost, costs_left
42 integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
43 real :: avg_cost
44 integer :: costs(size(npts_tile(:)))
45
46 ug_domain%SG_domain => sg_domain
47 ntiles = size(npts_tile(:))
48 ug_domain%ntiles = ntiles
49
50 !--- total number of points must be no less than ndivs
51 if(sum(npts_tile)<ndivs) call mpp_error(fatal, &
52 & "mpp_define_unstruct_domain: total number of points is less than ndivs")
53 !--- We are assuming nlev on each grid is at least one.
54 do n = 1, size(grid_nlev(:))
55 if(grid_nlev(n) < 1) call mpp_error(fatal, &
56 & "mpp_define_unstruct_domain: grid_nlev at some point is less than 1")
57 enddo
58
59 !-- costs for each tile.
60 pos = 0
61 do n = 1, ntiles
62 costs(n) = 0
63 do i = 1, npts_tile(n)
64 pos = pos + 1
65 costs(n) = costs(n) + grid_nlev(pos)
66 enddo
67 enddo
68 ! compute number of divisions for each tile.
69 ntotal_costs = sum(costs)
70 !--- get the upper limit of ndivs for each tile.
71 do n = 1, ntiles
72 ndivs_tile(n) = ceiling(real(costs(n)*ndivs)/ntotal_costs)
73 enddo
74
75 ndivs_used = sum(ndivs_tile)
76 do while (ndivs_used > ndivs)
77 max_cost = 0
78 cur_tile = 0
79 do n = 1, ntiles
80 if( ndivs_tile(n) > 1 ) then
81 cur_cost = ceiling(real(costs(n))/(ndivs_tile(n)-1))
82 if( max_cost == 0 .OR. cur_cost<max_cost) then
83 max_cost = cur_cost
84 cur_tile = n
85 endif
86 endif
87 enddo
88 ndivs_used = ndivs_used-1
89 ndivs_tile(cur_tile) = ndivs_tile(cur_tile) - 1
90 enddo
91
92 te = -1
93 ioff = 0
94 do n = 1, ntiles
95 ts = te + 1
96 te = ts + ndivs_tile(n) - 1
97 costs_left = costs(n)
98 ndiv_left = ndivs_tile(n)
99 npts_left = npts_tile(n)
100 cur_pos = 1
101 do ndiv = 1, ndivs_tile(n)
102 cur_cost = 0
103 ibegin(ts+ndiv-1) = cur_pos
104 avg_cost = real(costs_left)/ndiv_left
105 do i = cur_pos, npts_tile(n)
106 cur_cost = cur_cost + grid_nlev(i+ioff)
107 costs_left = costs_left - grid_nlev(i+ioff)
108 if(npts_left < ndiv_left ) then
109 call mpp_error(fatal, "mpp_define_unstruct_domain: npts_left < ndiv_left")
110 else if(npts_left == ndiv_left ) then
111 cur_pos = i + 1
112 exit
113 else if(cur_cost .GE. avg_cost) then
114 prev_cost = cur_cost - grid_nlev(i+ioff)
115 if(i==cur_pos) then
116 cur_pos = i + 1
117 exit
118 else if( cur_cost - avg_cost .LE. avg_cost - prev_cost ) then
119 cur_pos = i + 1
120 exit
121 else
122 cur_pos = i
123 cur_cost = prev_cost
124 costs_left = costs_left + grid_nlev(i+ioff)
125 npts_left = npts_left+1
126 exit
127 endif
128 endif
129 npts_left = npts_left-1
130 enddo
131 iend(ts+ndiv-1) = cur_pos - 1
132 costs_list(ts+ndiv-1) = cur_cost
133 ndiv_left = ndiv_left-1
134 npts_left = npts_left-1
135 enddo
136 pe_start(n) = ts
137 pe_end(n) = te
138 ioff = ioff+ npts_tile(n)
139 enddo
140 if (associated(ug_domain%list)) deallocate(ug_domain%list) !< Check if allocated
141 allocate(ug_domain%list(0:ndivs-1))
142 do p = 0, ndivs-1
143 ug_domain%list(p)%compute%begin = ibegin(p)
144 ug_domain%list(p)%compute%end = iend(p)
145 ug_domain%list(p)%compute%size = ug_domain%list(p)%compute%end - ug_domain%list(p)%compute%begin + 1
146 ug_domain%list(p)%compute%max_size = 0
147 ug_domain%list(p)%pos = p
148 ug_domain%list(p)%pe = p + mpp_root_pe()
149 pos = 0
150 do n = 1, ntiles
151 if( p .GE. pe_start(n) .AND. p .LE. pe_end(n) ) then
152 ug_domain%list(p)%tile_id = n
153 exit
154 endif
155 pos = pos + npts_tile(n)
156 enddo
157 is = ug_domain%list(p)%compute%begin+pos
158 ie = ug_domain%list(p)%compute%end+pos
159 ug_domain%list(p)%compute%begin_index = minval(grid_index(is:ie))
160 ug_domain%list(p)%compute%end_index = maxval(grid_index(is:ie))
161 enddo
162
163 !--- write out domain decomposition from root pe
164 if(mpp_pe() == mpp_root_pe() .and. present(name)) then
165 write(stdout(),*) "unstruct domain name = ", trim(name)
166 write(stdout(),*) ug_domain%list(:)%compute%size
167 endif
168
169 pos = mpp_pe() - mpp_root_pe()
170 ug_domain%pe = mpp_pe()
171 ug_domain%pos = pos
172 ug_domain%tile_id = ug_domain%list(pos)%tile_id
173 p = pe_start(ug_domain%tile_id)
174 ug_domain%tile_root_pe = ug_domain%list(p)%pe
175 ug_domain%tile_npes = pe_end(ug_domain%tile_id) - pe_start(ug_domain%tile_id) + 1
176 ug_domain%compute = ug_domain%list(pos)%compute
177 ug_domain%compute%max_size = maxval( ug_domain%list(:)%compute%size )
178 ug_domain%global%begin = 1
179 ug_domain%global%end = npts_tile(ug_domain%tile_id)
180 ug_domain%global%size = ug_domain%global%end - ug_domain%global%begin + 1
181 ug_domain%global%max_size = -1 ! currently this is not supposed to be used.
182 pos = 0
183 do n = 1, ug_domain%tile_id-1
184 pos = pos + npts_tile(n)
185 enddo
186 ug_domain%global%begin_index = grid_index(pos+1)
187 ug_domain%global%end_index = grid_index(pos+npts_tile(n))
188
189 if (associated(ug_domain%grid_index)) deallocate(ug_domain%grid_index) !< Check if allocated
190 allocate(ug_domain%grid_index(ug_domain%compute%size))
191 do n = 1, ug_domain%compute%size
192 ug_domain%grid_index(n) = grid_index(pos+ug_domain%compute%begin+n-1)
193 enddo
194
195 !--- define io_domain
196 if (associated(ug_domain%io_domain)) deallocate(ug_domain%io_domain) !< Check if allocated
197 allocate(ug_domain%io_domain)
198 tile_id = ug_domain%tile_id
199 ug_domain%io_domain%pe = ug_domain%pe
200 !--- figure out number groups for current tile
201 if(npes_io_group == 0) then
202 ngroup = 1
203 else
204 ngroup = ceiling(real(ndivs_tile(tile_id))/ npes_io_group)
205 endif
206
207!----------
208!ug support
209 ug_domain%npes_io_group = npes_io_group
210 ug_domain%io_layout = ngroup
211!----------
212
213 call mpp_compute_extent(1, ndivs_tile(tile_id), ngroup, ibegin(0:ngroup-1), iend(0:ngroup-1))
214 my_pos = ug_domain%pe - ug_domain%tile_root_pe + 1
215 do n = 0, ngroup-1
216 if( my_pos .GE. ibegin(n) .AND. my_pos .LE. iend(n) ) then
217 group_id = n
218 exit
219 endif
220 enddo
221
222 ug_domain%io_domain%tile_id = group_id+1
223 ug_domain%io_domain%compute = ug_domain%compute
224 ug_domain%io_domain%pe = ug_domain%pe
225 ug_domain%io_domain%pos = my_pos - ibegin(group_id) + 1
226 ug_domain%io_domain%tile_root_pe = ibegin(group_id) + ug_domain%tile_root_pe - 1
227 pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe()
228 ug_domain%io_domain%global%begin = ug_domain%list(pos)%compute%begin
229 ug_domain%io_domain%global%begin_index = ug_domain%list(pos)%compute%begin_index
230 pos = iend(group_id) + ug_domain%tile_root_pe - mpp_root_pe() - 1
231 ug_domain%io_domain%global%end = ug_domain%list(pos)%compute%end
232 ug_domain%io_domain%global%end_index = ug_domain%list(pos)%compute%end_index
233 ug_domain%io_domain%global%size = ug_domain%io_domain%global%end - ug_domain%io_domain%global%begin + 1
234
235 npes_in_group = iend(group_id) - ibegin(group_id) + 1
236 if (associated(ug_domain%io_domain%list)) deallocate(ug_domain%io_domain%list) !< Check if allocated
237 allocate(ug_domain%io_domain%list(0:npes_in_group-1))
238 do n = 0, npes_in_group-1
239 pos = ug_domain%io_domain%tile_root_pe - mpp_root_pe() + n
240 ug_domain%io_domain%list(n)%compute = ug_domain%list(pos)%compute
241 ug_domain%io_domain%list(n)%pos = n
242 ug_domain%io_domain%list(n)%pe = ug_domain%list(pos)%pe
243 ug_domain%io_domain%list(n)%tile_id = group_id+1
244 enddo
245
246 call compute_overlap_sg2ug(ug_domain, sg_domain)
247 call compute_overlap_ug2sg(ug_domain)
248
249 return
250
251 end subroutine mpp_define_unstruct_domain
252
253
254 !####################################################################
255 subroutine compute_overlap_sg2ug(UG_domain, SG_domain)
256 type(domainUG), intent(inout) :: UG_domain
257 type(domain2d), intent(in) :: SG_domain
258 integer, dimension(0:size(SG_domain%list(:))-1) :: send_cnt, recv_cnt
259 integer, dimension(0:size(SG_domain%list(:))-1) :: send_buffer_pos, recv_buffer_pos
260 integer, dimension(:), allocatable :: send_buffer, recv_buffer, index_list
261 integer, dimension(:), allocatable :: buffer_pos
262 integer :: tile_id, nlist, nxg, begin_index, end_index, i, j
263 integer :: m, n, list, l, isc, iec, jsc, jec, ibegin, iend, grid_index
264 integer :: nrecv, nsend, send_pos, recv_pos, pos
265
266 !--- figure out the recv index information.
267 tile_id = ug_domain%tile_id
268 nlist = size(sg_domain%list(:))
269 nxg = sg_domain%x(1)%global%size
270 begin_index = ug_domain%compute%begin_index
271 end_index = ug_domain%compute%end_index
272 pos = 0
273 recv_cnt = 0
274 allocate(index_list(ug_domain%compute%size))
275 allocate(send_buffer(ug_domain%compute%size))
276 index_list = -1
277 do n = 0, nlist-1
278 if(sg_domain%list(n)%tile_id(1) .NE. tile_id) cycle
279 isc = sg_domain%list(n)%x(1)%compute%begin; iec = sg_domain%list(n)%x(1)%compute%end
280 jsc = sg_domain%list(n)%y(1)%compute%begin; jec = sg_domain%list(n)%y(1)%compute%end
281 ibegin = (jsc-1)*nxg + isc
282 iend = (jec-1)*nxg + iec
283 if(ibegin > end_index .OR. iend < begin_index) cycle
284 do l = 1, ug_domain%compute%size
285 grid_index = ug_domain%grid_index(l)
286 i = mod((grid_index-1), nxg) + 1
287 j = (grid_index-1)/nxg + 1
288 if( i .GE. isc .AND. i .LE. iec .and. j .GE. jsc .AND. j .LE. jec ) then
289 recv_cnt(n) = recv_cnt(n) + 1
290 pos = pos + 1
291 if(pos > ug_domain%compute%size) call mpp_error(fatal, &
292 'compute_overlap_SG2UG: pos > UG_domain%compute%size')
293 index_list(pos) = l
294 send_buffer(pos) = grid_index
295 endif
296 enddo
297 enddo
298
299 !--- make sure sum(recv_cnt) == UG_domain%compute%size
300 if( ug_domain%compute%size .NE. sum(recv_cnt) ) then
301 print*,"pe=", mpp_pe(), ug_domain%compute%size, sum(recv_cnt)
302 call mpp_error(fatal, &
303 .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(recv_cnt)")
304 endif
305 allocate(buffer_pos(0:nlist-1))
306 pos = 0
307 do list = 0,nlist-1
308 buffer_pos(list) = pos
309 pos = pos + recv_cnt(list)
310 enddo
311
312 nrecv = count( recv_cnt > 0 )
313 ug_domain%SG2UG%nrecv = nrecv
314 if (associated(ug_domain%SG2UG%recv)) deallocate(ug_domain%SG2UG%recv) !< Check if allocated
315 allocate(ug_domain%SG2UG%recv(nrecv))
316 nrecv = 0
317 pos = 0
318 do list = 0,nlist-1
319 m = mod( sg_domain%pos+nlist-list, nlist )
320 if( recv_cnt(m) > 0 ) then
321 nrecv = nrecv+1
322 ug_domain%SG2UG%recv(nrecv)%count = recv_cnt(m)
323 ug_domain%SG2UG%recv(nrecv)%pe = ug_domain%list(m)%pe
324 allocate(ug_domain%SG2UG%recv(nrecv)%i(recv_cnt(m)))
325 pos = buffer_pos(m)
326 do l = 1, recv_cnt(m)
327 pos = pos + 1
328 ug_domain%SG2UG%recv(nrecv)%i(l) = index_list(pos)
329 enddo
330 endif
331 enddo
332
333 !--- figure out the send index information.
334 send_cnt = recv_cnt
335 recv_cnt = 0
336 call mpp_alltoall(send_cnt,1,recv_cnt,1)
337 !--- make sure sum(send_cnt) == UG_domain%compute%size
338 if( ug_domain%compute%size .NE. sum(send_cnt) ) call mpp_error(fatal, &
339 .NE."compute_overlap_SG2UG: UG_domain%compute%size sum(send_cnt)")
340 allocate(recv_buffer(sum(recv_cnt)))
341 send_buffer_pos = 0; recv_buffer_pos = 0
342 send_pos = 0; recv_pos = 0
343 do n = 0, nlist-1
344 if(send_cnt(n) > 0) then
345 send_buffer_pos(n) = send_pos
346 send_pos = send_pos + send_cnt(n)
347 endif
348 if(recv_cnt(n) > 0) then
349 recv_buffer_pos(n) = recv_pos
350 recv_pos = recv_pos + recv_cnt(n)
351 endif
352 enddo
353
354 call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
355 recv_buffer, recv_cnt, recv_buffer_pos)
356
357 nsend = count( recv_cnt(:) > 0 )
358 ug_domain%SG2UG%nsend = nsend
359 if (associated(ug_domain%SG2UG%send)) deallocate(ug_domain%SG2UG%send) !< Check if allocated
360 allocate(ug_domain%SG2UG%send(nsend))
361 nsend = 0
362 isc = sg_domain%x(1)%compute%begin
363 jsc = sg_domain%y(1)%compute%begin
364 do list = 0,nlist-1
365 m = mod( sg_domain%pos+list, nlist )
366 if( recv_cnt(m) > 0 ) then
367 nsend = nsend+1
368 ug_domain%SG2UG%send(nsend)%count = recv_cnt(m)
369 ug_domain%SG2UG%send(nsend)%pe = ug_domain%list(m)%pe
370 allocate(ug_domain%SG2UG%send(nsend)%i(recv_cnt(m)))
371 allocate(ug_domain%SG2UG%send(nsend)%j(recv_cnt(m)))
372 pos = recv_buffer_pos(m)
373 do l = 1, recv_cnt(m)
374 grid_index = recv_buffer(pos+l)
375 ug_domain%SG2UG%send(nsend)%i(l) = mod(grid_index-1,nxg) + 1
376 ug_domain%SG2UG%send(nsend)%j(l) = (grid_index-1)/nxg + 1
377 enddo
378 endif
379 enddo
380 deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
381
382return
383
384 end subroutine compute_overlap_sg2ug
385
386 !####################################################################
387 subroutine compute_overlap_ug2sg(UG_domain)
388 type(domainUG), intent(inout) :: UG_domain
389
390 !--- UG2SG is the reverse of SG2UG
391 ug_domain%UG2SG%nsend = ug_domain%SG2UG%nrecv
392 ug_domain%UG2SG%send => ug_domain%SG2UG%recv
393 ug_domain%UG2SG%nrecv = ug_domain%SG2UG%nsend
394 ug_domain%UG2SG%recv => ug_domain%SG2UG%send
395
396 return
397
398 end subroutine compute_overlap_ug2sg
399
400 !####################################################################
401 subroutine mpp_get_ug_sg_domain(UG_domain,SG_domain)
402 type(domainUG), intent(inout) :: UG_domain
403 type(domain2d), pointer :: SG_domain
404
405 sg_domain => ug_domain%SG_domain
406
407 return
408
409 end subroutine mpp_get_ug_sg_domain
410
411 !####################################################################
412 function mpp_get_ug_io_domain(domain)
413 type(domainUG), intent(in) :: domain
414 type(domainUG), pointer :: mpp_get_UG_io_domain
415
416 if(ASSOCIATED(domain%io_domain)) then
417 mpp_get_ug_io_domain => domain%io_domain
418 else
419 call mpp_error(fatal, "mpp_get_UG_io_domain: io_domain is not defined, contact developer")
420 endif
421
422 end function mpp_get_ug_io_domain
423
424 !#####################################################################
425 subroutine mpp_get_ug_compute_domain( domain, begin, end, size)
426 type(domainUG), intent(in) :: domain
427 integer, intent(out), optional :: begin, end, size
428
429 if( PRESENT(begin) )begin = domain%compute%begin
430 if( PRESENT(end) )end = domain%compute%end
431 if( PRESENT(size) )size = domain%compute%size
432 return
433 end subroutine mpp_get_ug_compute_domain
434
435 !#####################################################################
436 subroutine mpp_get_ug_global_domain( domain, begin, end, size)
437 type(domainUG), intent(in) :: domain
438 integer, intent(out), optional :: begin, end, size
439
440 if( PRESENT(begin) )begin = domain%global%begin
441 if( PRESENT(end) )end = domain%global%end
442 if( PRESENT(size) )size = domain%global%size
443 return
444 end subroutine mpp_get_ug_global_domain
445
446 !#####################################################################
447 subroutine mpp_get_ug_compute_domains( domain, begin, end, size )
448 type(domainUG), intent(in) :: domain
449 integer, intent(out), optional, dimension(:) :: begin, end, size
450
451 !we use shape instead of size for error checks because size is used as an argument
452 if( PRESENT(begin) )then
453 if( any(shape(begin).NE.shape(domain%list)) ) &
454 call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
455 begin(:) = domain%list(:)%compute%begin
456 end if
457 if( PRESENT(end) )then
458 if( any(shape(end).NE.shape(domain%list)) ) &
459 call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
460 end(:) = domain%list(:)%compute%end
461 end if
462 if( PRESENT(size) )then
463 if( any(shape(size).NE.shape(domain%list)) ) &
464 call mpp_error( fatal, 'mpp_get_UG_compute_domains: size array size does not match domain.' )
465 size(:) = domain%list(:)%compute%size
466 end if
467 return
468 end subroutine mpp_get_ug_compute_domains
469
470 !#####################################################################
471 subroutine mpp_get_ug_domains_index( domain, begin, end)
472 type(domainUG), intent(in) :: domain
473 integer, intent(out), dimension(:) :: begin, end
474
475 !we use shape instead of size for error checks because size is used as an argument
476 if( any(shape(begin).NE.shape(domain%list)) ) &
477 call mpp_error( fatal, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
478 begin(:) = domain%list(:)%compute%begin_index
479 if( any(shape(end).NE.shape(domain%list)) ) &
480 call mpp_error( fatal, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
481 end(:) = domain%list(:)%compute%end_index
482 return
483 end subroutine mpp_get_ug_domains_index
484
485 !#####################################################################
486 function mpp_get_ug_domain_ntiles(domain)
487 type(domainUG), intent(in) :: domain
488 integer :: mpp_get_UG_domain_ntiles
489
490 mpp_get_ug_domain_ntiles = domain%ntiles
491 return
492 end function mpp_get_ug_domain_ntiles
493
494 !#######################################################################
495 subroutine mpp_get_ug_domain_tile_list(domain, tiles)
496 type(domainUG), intent(in) :: domain
497 integer, intent(inout) :: tiles(:)
498 integer :: i
499
500 if( size(tiles(:)).NE.size(domain%list(:)) ) &
501 call mpp_error( fatal, 'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
502 do i = 1, size(tiles(:))
503 tiles(i) = domain%list(i-1)%tile_id
504 end do
505
506 end subroutine mpp_get_ug_domain_tile_list
507
508 !#####################################################################
509 function mpp_get_ug_domain_tile_id(domain)
510 type(domainUG), intent(in) :: domain
511 integer :: mpp_get_UG_domain_tile_id
512
513 mpp_get_ug_domain_tile_id = domain%tile_id
514 return
515 end function mpp_get_ug_domain_tile_id
516
517 !####################################################################
518 function mpp_get_ug_domain_npes(domain)
519 type(domainUG), intent(in) :: domain
520 integer :: mpp_get_UG_domain_npes
521
522 mpp_get_ug_domain_npes = size(domain%list(:))
523 return
524
525 end function mpp_get_ug_domain_npes
526
527
528 !####################################################################
529 subroutine mpp_get_ug_domain_pelist( domain, pelist)
530 type(domainUG), intent(in) :: domain
531 integer, intent(out) :: pelist(:)
532
533 if( size(pelist(:)).NE.size(domain%list(:)) ) &
534 call mpp_error( fatal, 'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
535
536 pelist(:) = domain%list(:)%pe
537 return
538
539 end subroutine mpp_get_ug_domain_pelist
540
541 !###################################################################
542 subroutine mpp_get_ug_domain_tile_pe_inf( domain, root_pe, npes, pelist)
543 type(domainUG), intent(in) :: domain
544 integer, optional, intent(out) :: root_pe, npes
545 integer, optional, intent(out) :: pelist(:)
546
547 if(present(root_pe)) root_pe = domain%tile_root_pe
548 if(present(npes)) npes = domain%tile_npes
549
550 if(present(pelist)) then
551 if( size(pelist(:)).NE. domain%tile_npes ) &
552 call mpp_error( fatal, 'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
553 pelist(:) = domain%list(domain%pos:domain%pos+domain%tile_npes-1)%pe
554 endif
555 return
556
557 end subroutine mpp_get_ug_domain_tile_pe_inf
558
559
560 !####################################################################
561 subroutine mpp_get_ug_domain_grid_index( domain, grid_index)
562 type(domainUG), intent(in) :: domain
563 integer, intent(out) :: grid_index(:)
564
565 if( size(grid_index(:)).NE.size(domain%grid_index(:)) ) &
566 call mpp_error( fatal, 'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
567
568 grid_index(:) = domain%grid_index(:)
569 return
570
571 end subroutine mpp_get_ug_domain_grid_index
572
573 !###################################################################
574 subroutine mpp_define_null_ug_domain(domain)
575 type(domainUG), intent(inout) :: domain
576
577 domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
578 domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
579 domain%pe = null_pe
580 domain%ntiles = -1
581 domain%pos = -1
582 domain%tile_id = -1
583 domain%tile_root_pe = -1
584
585 end subroutine mpp_define_null_ug_domain
586
587!##############################################################################
588 !> @brief Broadcast domain (useful only outside the context of its own pelist)
589 subroutine mpp_broadcast_domain_ug( domain )
590 type(domainUG), intent(inout) :: domain
591 integer, allocatable :: pes(:)
592 logical :: native !true if I'm on the pelist of this domain
593 integer :: listsize, listpos
594 integer :: n
595 integer, dimension(7) :: msg, info !pe and compute domain of each item in list
596 integer :: errunit
597
598 errunit = stderr()
599 if( .NOT.module_is_initialized ) &
600 call mpp_error( fatal, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.' )
601
602!get the current pelist
603 allocate( pes(0:mpp_npes()-1) )
604 call mpp_get_current_pelist(pes)
605
606!am I part of this domain?
607 native = ASSOCIATED(domain%list)
608
609!set local list size
610 if( native )then
611 listsize = size(domain%list(:))
612 else
613 listsize = 0
614 end if
615 call mpp_max(listsize)
616
617 if( .NOT.native )then
618!initialize domain%list and set null values in message
619 if (associated(domain%list)) deallocate(domain%list) !< Check if allocated
620 allocate( domain%list(0:listsize-1) )
621 domain%pe = null_pe
622 domain%pos = -1
623 domain%ntiles = -1
624 domain%compute%begin = 1
625 domain%compute%end = -1
626 domain%compute%begin_index = 1
627 domain%compute%end_index = -1
628 domain%global %begin = -1
629 domain%global %end = -1
630 domain%tile_id = -1
631 domain%tile_root_pe = -1
632 end if
633!initialize values in info
634 info(1) = domain%pe
635 info(2) = domain%pos
636 info(3) = domain%tile_id
637 call mpp_get_ug_compute_domain( domain, info(4), info(5))
638 info(6) = domain%compute%begin_index
639 info(7) = domain%compute%end_index
640!broadcast your info across current pelist and unpack if needed
641 listpos = 0
642 do n = 0,mpp_npes()-1
643 msg = info
644 if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
645 call mpp_broadcast( msg, 7, pes(n) )
646!no need to unpack message if native
647!no need to unpack message from non-native PE
648 if( .NOT.native .AND. msg(1).NE.null_pe )then
649 domain%list(listpos)%pe = msg(1)
650 domain%list(listpos)%pos = msg(2)
651 domain%list(listpos)%tile_id = msg(3)
652 domain%list(listpos)%compute%begin = msg(4)
653 domain%list(listpos)%compute%end = msg(5)
654 domain%list(listpos)%compute%begin_index = msg(6)
655 domain%list(listpos)%compute%end_index = msg(7)
656 listpos = listpos + 1
657 if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'ls,le=', msg(4:5)
658 end if
659 end do
660
661 end subroutine mpp_broadcast_domain_ug
662
663!------------------------------------------------------------------------------
664function mpp_domain_ug_is_tile_root_pe(domain) result(is_root)
665
666 !<Inputs/Outputs
667 type(domainUG),intent(in) :: domain
668 logical(l8_kind) :: is_root
669
670 if (domain%pe .eq. domain%tile_root_pe) then
671 is_root = .true.
672 else
673 is_root = .false.
674 endif
675
676 return
677end function mpp_domain_ug_is_tile_root_pe
678
679!------------------------------------------------------------------------------
680!HELP: There needs to be a subroutine to return the "io_layout" for
681! an unstructured domain, so I made one. Someone should check
682! to see if this is correct.
683function mpp_get_io_domain_ug_layout(domain) result(io_layout)
684
685 !<Inputs/Outputs
686 type(domainUG),intent(in) :: domain
687 integer(i4_kind) :: io_layout
688
689 io_layout = domain%io_layout
690
691 return
692end function
693
694
695!------------------------------------------------------------------
696subroutine deallocate_unstruct_overlap_type(overlap)
697 type(unstruct_overlap_type), intent(inout) :: overlap
698
699 if(associated(overlap%i)) deallocate(overlap%i)
700 if(associated(overlap%j)) deallocate(overlap%j)
701
702end subroutine deallocate_unstruct_overlap_type
703
704!------------------------------------------------------------------
705subroutine deallocate_unstruct_pass_type(domain)
706 type(domainUG), intent(inout) :: domain
707 integer :: n
708
709 do n = 1, domain%UG2SG%nsend
710 call deallocate_unstruct_overlap_type(domain%UG2SG%send(n))
711 enddo
712 do n = 1, domain%UG2SG%nrecv
713 call deallocate_unstruct_overlap_type(domain%UG2SG%recv(n))
714 enddo
715
716 ! SG2UG%{send,recv} point to the same memory as UG2SG%{send,recv}
717 ! respectively. Thus, we only need to `deallocate` one, and nullify
718 ! the other set.
719 if(associated(domain%UG2SG%send)) then
720 deallocate(domain%UG2SG%send)
721 nullify(domain%UG2SG%send)
722 nullify(domain%SG2UG%recv)
723 end if
724 if(associated(domain%UG2SG%recv)) then
725 deallocate(domain%UG2SG%recv)
726 nullify(domain%UG2SG%recv)
727 nullify(domain%SG2UG%send)
728 end if
729end subroutine deallocate_unstruct_pass_type
730
731!------------------------------------------------------------------
732subroutine mpp_deallocate_domainug(domain)
733
734 !<Inputs/Outputs
735 type(domainUG),intent(inout) :: domain
736
737 if (associated(domain%list)) then
738 deallocate(domain%list)
739 domain%list => null()
740 endif
741
742 if (associated(domain%io_domain)) then
743 if (associated(domain%io_domain%list)) then
744 deallocate(domain%io_domain%list)
745 domain%io_domain%list => null()
746 endif
747 deallocate(domain%io_domain)
748 domain%io_domain => null()
749 endif
750
751 call deallocate_unstruct_pass_type(domain)
752
753 if (associated(domain%grid_index)) then
754 deallocate(domain%grid_index)
755 domain%grid_index => null()
756 endif
757
758 if (associated(domain%SG_domain)) then
759 domain%SG_domain => null()
760 endif
761
762 return
763end subroutine mpp_deallocate_domainug
764
765 !###################################################################
766 !> Overload the .eq. for UG
767 function mpp_domainug_eq( a, b )
768 logical :: mpp_domainUG_eq
769 type(domainUG), intent(in) :: a, b
770
771 if (associated(a%SG_domain) .and. associated(b%SG_domain)) then
772 if (a%SG_domain .ne. b%SG_domain) then
773 mpp_domainug_eq = .false.
774 return
775 endif
776 elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then
777 mpp_domainug_eq = .false.
778 return
779 elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then
780 mpp_domainug_eq = .false.
781 return
782 endif
783
784 mpp_domainug_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. &
785 (a%pos .EQ. b%pos) .AND. &
786 (a%ntiles .EQ. b%ntiles) .AND. &
787 (a%tile_id .EQ. b%tile_id) .AND. &
788 (a%tile_npes .EQ. b%tile_npes) .AND. &
789 (a%tile_root_pe .EQ. b%tile_root_pe)
790
791 if(.not. mpp_domainug_eq) return
792
793 mpp_domainug_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
794 a%compute%end .EQ.b%compute%end .AND. &
795 a%global%begin .EQ.b%global%begin .AND. &
796 a%global%end .EQ.b%global%end .AND. &
797 a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. &
798 a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. &
799 a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. &
800 a%UG2SG%nrecv .EQ.b%UG2SG%nrecv &
801 )
802
803 return
804 end function mpp_domainug_eq
805
806 !> Overload the .ne. for UG
807 function mpp_domainug_ne( a, b )
808 logical :: mpp_domainUG_ne
809 type(domainUG), intent(in) :: a, b
810
811 mpp_domainug_ne = .NOT. ( a.EQ.b )
812 return
813 end function mpp_domainug_ne
814
815#undef MPP_TYPE_
816#define MPP_TYPE_ real(r8_kind)
817#undef mpp_pass_SG_to_UG_2D_
818#define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r8_2d
819#undef mpp_pass_SG_to_UG_3D_
820#define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r8_3d
821#undef mpp_pass_UG_to_SG_2D_
822#define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r8_2d
823#undef mpp_pass_UG_to_SG_3D_
824#define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r8_3d
825#include <mpp_unstruct_pass_data.fh>
826
827#undef MPP_TYPE_
828#define MPP_TYPE_ real(r4_kind)
829#undef mpp_pass_SG_to_UG_2D_
830#define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r4_2d
831#undef mpp_pass_SG_to_UG_3D_
832#define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r4_3d
833#undef mpp_pass_UG_to_SG_2D_
834#define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r4_2d
835#undef mpp_pass_UG_to_SG_3D_
836#define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r4_3d
837#include <mpp_unstruct_pass_data.fh>
838
839#undef MPP_TYPE_
840#define MPP_TYPE_ integer(i4_kind)
841#undef mpp_pass_SG_to_UG_2D_
842#define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_i4_2d
843#undef mpp_pass_SG_to_UG_3D_
844#define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_i4_3d
845#undef mpp_pass_UG_to_SG_2D_
846#define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_i4_2d
847#undef mpp_pass_UG_to_SG_3D_
848#define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_i4_3d
849#include <mpp_unstruct_pass_data.fh>
850
851#undef MPP_TYPE_
852#define MPP_TYPE_ logical(i4_kind)
853#undef mpp_pass_SG_to_UG_2D_
854#define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_l4_2d
855#undef mpp_pass_SG_to_UG_3D_
856#define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_l4_3d
857#undef mpp_pass_UG_to_SG_2D_
858#define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_l4_2d
859#undef mpp_pass_UG_to_SG_3D_
860#define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_l4_3d
861#include <mpp_unstruct_pass_data.fh>
862
863#undef MPP_GLOBAL_FIELD_UG_2D_
864#define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r8_2d
865#undef MPP_GLOBAL_FIELD_UG_3D_
866#define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r8_3d
867#undef MPP_GLOBAL_FIELD_UG_4D_
868#define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r8_4d
869#undef MPP_GLOBAL_FIELD_UG_5D_
870#define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r8_5d
871#undef MPP_TYPE_
872#define MPP_TYPE_ real(r8_kind)
873#include <mpp_global_field_ug.fh>
874
875#undef MPP_GLOBAL_FIELD_UG_2D_
876#define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i8_2d
877#undef MPP_GLOBAL_FIELD_UG_3D_
878#define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i8_3d
879#undef MPP_GLOBAL_FIELD_UG_4D_
880#define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i8_4d
881#undef MPP_GLOBAL_FIELD_UG_5D_
882#define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i8_5d
883#undef MPP_TYPE_
884#define MPP_TYPE_ integer(i8_kind)
885#include <mpp_global_field_ug.fh>
886
887#undef MPP_GLOBAL_FIELD_UG_2D_
888#define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r4_2d
889#undef MPP_GLOBAL_FIELD_UG_3D_
890#define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r4_3d
891#undef MPP_GLOBAL_FIELD_UG_4D_
892#define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r4_4d
893#undef MPP_GLOBAL_FIELD_UG_5D_
894#define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r4_5d
895#undef MPP_TYPE_
896#define MPP_TYPE_ real(r4_kind)
897#include <mpp_global_field_ug.fh>
898
899#undef MPP_GLOBAL_FIELD_UG_2D_
900#define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i4_2d
901#undef MPP_GLOBAL_FIELD_UG_3D_
902#define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i4_3d
903#undef MPP_GLOBAL_FIELD_UG_4D_
904#define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i4_4d
905#undef MPP_GLOBAL_FIELD_UG_5D_
906#define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i4_5d
907#undef MPP_TYPE_
908#define MPP_TYPE_ integer(i4_kind)
909#include <mpp_global_field_ug.fh>
910!> @}
subroutine mpp_compute_extent(isg, ieg, ndivs, ibegin, iend, extent)
Computes extents for a grid decomposition with the given indices and divisions.