FMS  2025.04
Flexible Modeling System
drifters_comm.F90
1 !***********************************************************************
2 !* Apache License 2.0
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* Licensed under the Apache License, Version 2.0 (the "License");
7 !* you may not use this file except in compliance with the License.
8 !* You may obtain a copy of the License at
9 !*
10 !* http://www.apache.org/licenses/LICENSE-2.0
11 !*
12 !* FMS is distributed in the hope that it will be useful, but WITHOUT
13 !* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied;
14 !* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 !* PARTICULAR PURPOSE. See the License for the specific language
16 !* governing permissions and limitations under the License.
17 !***********************************************************************
18 #include "fms_switches.h"
19 
20 !> @defgroup drifters_comm_mod drifters_comm_mod
21 !> @ingroup drifters
22 !> @brief Routines and types to update drifter positions across processor domains
23 
24 module drifters_comm_mod
25 #ifdef use_drifters
26 
27 #ifdef _SERIAL
28 
29 #define _TYPE_DOMAIN2D integer
30 #define _NULL_PE 0
31 
32 #else
33 
34  use mpp_mod, only : null_pe, fatal, note, mpp_error, mpp_pe, mpp_npes
35  use mpp_mod, only : mpp_root_pe
36  use mpp_mod, only : mpp_send, mpp_recv, mpp_sync_self
37  use mpp_mod, only : comm_tag_1, comm_tag_2, comm_tag_3, comm_tag_4
38  use mpp_domains_mod, only : domain2d
39  use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_domains, mpp_get_layout
40  use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain
41  use mpp_domains_mod, only : north, south, east, west, cyclic_global_domain
42  use mpp_domains_mod, only : north_east, south_east, south_west, north_west
43 
44 #define _TYPE_DOMAIN2D type(domain2d)
45 #define _NULL_PE NULL_PE
46 
47 #endif
48 
49  use drifters_core_mod, only: drifters_core_type, drifters_core_remove_and_add, drifters_core_set_positions
50 
51  implicit none
52  private
53 
54  public :: drifters_comm_type, drifters_comm_new, drifters_comm_del, drifters_comm_set_pe_neighbors
55  public :: drifters_comm_set_domain, drifters_comm_update, drifters_comm_gather
56 
57  !> Type for drifter communication between PE's
58  !> @ingroup drifters_comm_mod
59  type :: drifters_comm_type
60  real :: xcmin !< compute domain
61  real :: xcmax !< compute domain
62  real :: ycmin !< compute domain
63  real :: ycmax !< compute domain
64  real :: xdmin !< data domain
65  real :: xdmax !< data domain
66  real :: ydmin !< data domain
67  real :: ydmax !< data domain
68  real :: xgmin !< global valid min/max
69  real :: xgmax !< global valid min/max
70  real :: ygmin !< global valid min/max
71  real :: ygmax !< global valid min/max
72  logical :: xperiodic !< x/y period (can be be nearly infinite)
73  logical :: yperiodic !< x/y period (can be be nearly infinite)
74  integer :: pe_N !< neighbor domains
75  integer :: pe_S !< neighbor domains
76  integer :: pe_E !< neighbor domains
77  integer :: pe_W !< neighbor domains
78  integer :: pe_NE !< neighbor domains
79  integer :: pe_SE !< neighbor domains
80  integer :: pe_SW !< neighbor domains
81  integer :: pe_NW !< neighbor domains
82  integer :: pe_beg !< starting/ending pe, set this to a value /= 0 if running concurrently
83  integer :: pe_end !< starting/ending pe, set this to a value /= 0 if running concurrently
84  end type drifters_comm_type
85 
86 contains
87 
88 !> @addtogroup drifters_comm_mod
89 !> @{
90 !===============================================================================
91  !> @brief Initializes default values for @ref drifters_comm_type in self
92  subroutine drifters_comm_new(self)
93  type(drifters_comm_type) :: self !< A new @ref drifters_comm_type
94 
95  self%xcmin = -huge(1.); self%xcmax = +huge(1.)
96  self%ycmin = -huge(1.); self%ycmax = +huge(1.)
97 
98  self%xdmin = -huge(1.); self%xdmax = +huge(1.)
99  self%ydmin = -huge(1.); self%ydmax = +huge(1.)
100 
101  self%xgmin = -huge(1.); self%xgmax = +huge(1.)
102  self%ygmin = -huge(1.); self%ygmax = +huge(1.)
103 
104  self%xperiodic = .false.; self%yperiodic = .false.
105 
106  self%pe_N = _null_pe
107  self%pe_S = _null_pe
108  self%pe_E = _null_pe
109  self%pe_W = _null_pe
110  self%pe_NE = _null_pe
111  self%pe_SE = _null_pe
112  self%pe_SW = _null_pe
113  self%pe_NW = _null_pe
114 
115  self%pe_beg = 0
116  self%pe_end = -1
117 
118 
119  end subroutine drifters_comm_new
120 
121 !===============================================================================
122  !> @brief Reset data in a given @ref drifters_comm_type to defaults
123  subroutine drifters_comm_del(self)
124  type(drifters_comm_type) :: self !< A @ref drifters_comm_type to reset
125 
126  ! nothing to deallocate
127  call drifters_comm_new(self)
128 
129  end subroutine drifters_comm_del
130 
131 !===============================================================================
132  !> @brief Set data domain bounds.
133  subroutine drifters_comm_set_data_bounds(self, xmin, ymin, xmax, ymax)
134  ! Set data domain bounds.
135  type(drifters_comm_type) :: self
136  real, intent(in) :: xmin, ymin, xmax, ymax
137 
138  self%xdmin = max(xmin, self%xdmin)
139  self%xdmax = min(xmax, self%xdmax)
140  self%ydmin = max(ymin, self%ydmin)
141  self%ydmax = min(ymax, self%ydmax)
142 
143  end subroutine drifters_comm_set_data_bounds
144 
145 !===============================================================================
146  !> @brief Set compute domain bounds.
147  subroutine drifters_comm_set_comp_bounds(self, xmin, ymin, xmax, ymax)
148  ! Set compute domain bounds.
149  type(drifters_comm_type) :: self
150  real, intent(in) :: xmin, ymin, xmax, ymax
151 
152  self%xcmin = max(xmin, self%xcmin)
153  self%xcmax = min(xmax, self%xcmax)
154  self%ycmin = max(ymin, self%ycmin)
155  self%ycmax = min(ymax, self%ycmax)
156 
157  end subroutine drifters_comm_set_comp_bounds
158 
159 !===============================================================================
160  !> @brief Set neighboring pe numbers.
161  subroutine drifters_comm_set_pe_neighbors(self, domain)
162  ! Set neighboring pe numbers.
163  type(drifters_comm_type) :: self !< Drifters communication type to set pe numbers for
164  _type_domain2d, intent(inout) :: domain !< domain to get neighboring PE's from
165 
166 #ifndef _SERIAL
167 ! parallel code
168 
169  integer :: pe_N, pe_S, pe_E, pe_W, pe_NE, pe_SE, pe_SW, pe_NW
170 
171  call mpp_get_neighbor_pe(domain, north , pe_n )
172  call mpp_get_neighbor_pe(domain, north_east, pe_ne)
173  call mpp_get_neighbor_pe(domain, east , pe_e )
174  call mpp_get_neighbor_pe(domain, south_east, pe_se)
175  call mpp_get_neighbor_pe(domain, south , pe_s )
176  call mpp_get_neighbor_pe(domain, south_west, pe_sw)
177  call mpp_get_neighbor_pe(domain, west , pe_w )
178  call mpp_get_neighbor_pe(domain, north_west, pe_nw)
179 
180  if(pe_n /= self%pe_N .and. self%pe_N == _null_pe) then
181  self%pe_N = pe_n
182  else if(pe_n /= self%pe_N ) then
183  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH PE changed!.')
184  endif
185  if(pe_ne /= self%pe_NE .and. self%pe_NE == _null_pe) then
186  self%pe_NE = pe_ne
187  else if(pe_ne /= self%pe_NE) then
188  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH-EAST PE changed!.')
189  endif
190  if(pe_e /= self%pe_E .and. self%pe_E == _null_pe) then
191  self%pe_E = pe_e
192  else if(pe_e /= self%pe_E ) then
193  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: EAST PE changed!.')
194  endif
195  if(pe_se /= self%pe_SE .and. self%pe_SE == _null_pe) then
196  self%pe_SE = pe_se
197  else if(pe_se /= self%pe_SE) then
198  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH-EAST PE changed!.')
199  endif
200  if(pe_s /= self%pe_S .and. self%pe_S == _null_pe) then
201  self%pe_S = pe_s
202  else if(pe_s /= self%pe_S ) then
203  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH PE changed!.')
204  endif
205  if(pe_sw /= self%pe_SW .and. self%pe_SW == _null_pe) then
206  self%pe_SW = pe_sw
207  else if(pe_sw /= self%pe_SW) then
208  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH-WEST PE changed!.')
209  endif
210  if(pe_w /= self%pe_W .and. self%pe_W == _null_pe) then
211  self%pe_W = pe_w
212  else if(pe_w /= self%pe_W ) then
213  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: WEST PE changed!.')
214  endif
215  if(pe_nw /= self%pe_NW .and. self%pe_NW == _null_pe) then
216  self%pe_NW = pe_nw
217  else if(pe_nw /= self%pe_NW) then
218  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH-WEST PE changed!.')
219  endif
220 
221 #endif
222 ! end of parallel code
223 
224  end subroutine drifters_comm_set_pe_neighbors
225 
226 !===============================================================================
227  !> @brief Set boundaries of domain and compute neighbors. This method can be called
228  !! multiple times; the data domain will just be the intersection (overlap) of
229  !! all domains (e.g domain_u, domain_v, etc).
230  subroutine drifters_comm_set_domain(self, domain, x, y, backoff_x, backoff_y)
231  ! Set boundaries of domain and compute neighbors. This method can be called
232  ! multiple times; the data domain will just be the intersection (overlap) of
233  ! all domains (e.g domain_u, domain_v, etc).
234  type(drifters_comm_type) :: self
235  _type_domain2d, intent(inout) :: domain
236  real, intent(in) :: x(:) !< global axes
237  real, intent(in) :: y(:) !< global axes
238  integer, intent(in) :: backoff_x !< >=0, data domain is reduced by "backoff_x,y" indices in x, resp. y
239  integer, intent(in) :: backoff_y !< >=0, data domain is reduced by "backoff_x,y" indices in x, resp. y
240 
241  ! compute/data domain start/end indices
242  integer :: isc !< compute domain start/end indices
243  integer :: iec !< compute domain start/end indices
244  integer :: jsc !< compute domain start/end indices
245  integer :: jec !< compute domain start/end indices
246  integer :: isd !< data domain start/end indices
247  integer :: ied !< data domain start/end indices
248  integer :: jsd !< data domain start/end indices
249  integer :: jed !< data domain start/end indices
250  integer nx, ny, hx, hy, bckf_x, bckf_y, halox, haloy
251  real dx, dy, xdmin, xdmax, ydmin, ydmax
252 
253 #ifdef _SERIAL
254  integer :: ibnds(1)
255 
256  ibnds = lbound(x); isc = ibnds(1)
257  ibnds = ubound(x); iec = ibnds(1) - 1
258  ibnds = lbound(y); jsc = ibnds(1)
259  ibnds = ubound(y); jec = ibnds(1) - 1
260 #else
261  call mpp_get_compute_domain( domain, isc, iec, jsc, jec )
262 #endif
263 
264  self%xcmin = max(x(isc), self%xcmin)
265  self%xcmax = min(x(iec), self%xcmax)
266  self%ycmin = max(y(jsc), self%ycmin)
267  self%ycmax = min(y(jec), self%ycmax)
268 
269  nx = iec - isc + 1
270  ny = jec - jsc + 1
271 
272 #ifdef _SERIAL
273  isd = 1; ied = size(x); jsd = 1; jed = size(y)
274 #else
275  call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
276 #endif
277 
278  hx = max(ied-iec, isc-isd)
279  hy = max(jed-jec, jsc-jsd)
280  bckf_x = min(backoff_x, hx)
281  bckf_y = min(backoff_y, hy)
282 
283  halox = max(0, hx - bckf_x)
284  haloy = max(0, hy - bckf_y)
285 
286  if(isd < 1) then
287  dx = x(2) - x(1)
288  xdmin = self%xcmin - dx*halox
289  else
290  xdmin = x(isd+bckf_x)
291  endif
292 
293  if(ied > nx) then
294  dx = x(nx) - x(nx-1)
295  xdmax = self%xcmax + dx*halox
296  else
297  xdmax = x(ied-bckf_x)
298  endif
299 
300  if(jsd < 1) then
301  dy = y(2) - y(1)
302  ydmin = self%ycmin - dy*haloy
303  else
304  ydmin = y(jsd+bckf_y)
305  endif
306 
307  if(jed > ny) then
308  dy = y(ny) - y(ny-1)
309  ydmax = self%ycmax + dy*haloy
310  else
311  ydmax = y(jed-bckf_y)
312  endif
313 
314  self%xdmin = max(xdmin, self%xdmin)
315  self%ydmin = max(ydmin, self%ydmin)
316  self%xdmax = min(xdmax, self%xdmax)
317  self%ydmax = min(ydmax, self%ydmax)
318 
319  call drifters_comm_set_pe_neighbors(self, domain)
320 
321  end subroutine drifters_comm_set_domain
322 
323 !===============================================================================
324  !> Updates drifter communication
325  subroutine drifters_comm_update(self, drfts, new_positions, &
326  & comm, remove, max_add_remove)
327 #ifndef _SERIAL
328  use mpi
329 #endif
330 
331  type(drifters_comm_type) :: self
332  type(drifters_core_type) :: drfts
333  real, intent(inout) :: new_positions(:,:)
334  integer, intent(in), optional :: comm !< MPI communicator
335  logical, intent(in), optional :: remove(:) !< Set to True for particles that should be removed
336  integer, intent(in), optional :: max_add_remove !< max no of particles to add/remove
337 
338 #ifdef _SERIAL
339 ! serial code
340 
341  drfts%positions(:, 1:drfts%np) = new_positions(:, 1:drfts%np)
342  return
343 
344 #else
345 ! parallel code
346  integer nd, np, nar_est, ip, neigh_pe, irem, pe, npes, ntuples
347  integer ntuples_tot, ndata, mycomm
348 #ifdef _USE_MPI
349  integer ier
350 #endif
351  integer, allocatable :: iadd(:)
352  integer, allocatable :: table_recv(:), table_send(:)
353  real , allocatable :: data_recv(:,:), data_send(:,:)
354  integer, allocatable :: indices_to_remove(:)
355  integer, allocatable :: ids_to_add(:)
356  real , allocatable :: positions_to_add(:,:)
357  real :: x, y, xold, yold
358  character(len=128) :: ermsg, notemsg
359  logical :: is_present
360  integer :: id, j, k, m, n, el
361  logical :: crossed_W, crossed_E, crossed_S, crossed_N
362  logical :: was_in_compute_domain, left_domain
363 
364  mycomm = mpi_comm_world
365  if( present(comm) ) mycomm = comm
366 
367  nd = drfts%nd
368  np = size(new_positions,2)
369  if(np > 0 .and. nd < 2) call mpp_error( fatal, &
370  & 'drifters_comm_update: number of dimensions must be 2 or higher.' )
371 
372  nar_est = 100
373  if(present(max_add_remove)) nar_est = max(1, max_add_remove)
374 
375  pe = mpp_pe()
376  npes = mpp_npes()
377 
378  ! assume pe list is contiguous, self%pe_beg...self%pe_end
379  allocate(iadd(self%pe_beg:self%pe_end))
380  allocate(table_recv(self%pe_beg:self%pe_end))
381  allocate(table_send(self%pe_beg:self%pe_end))
382  allocate(data_recv(nar_est*(1+nd), self%pe_beg:self%pe_end))
383  allocate(data_send(nar_est*(1+nd), self%pe_beg:self%pe_end))
384  allocate(indices_to_remove(nar_est))
385 
386  table_send = 0
387  table_recv = 0
388  data_send = 0
389  data_recv = 0
390 
391  iadd = 0
392  irem = 0
393  do ip = 1, np
394  x = new_positions(1, ip)
395  y = new_positions(2, ip)
396  xold = drfts%positions(1, ip)
397  yold = drfts%positions(2, ip)
398 
399  if( xold<self%xcmin .or. xold>self%xcmax .or. &
400  & yold<self%ycmin .or. yold>self%ycmax ) then
401  was_in_compute_domain = .false.
402  else
403  was_in_compute_domain = .true.
404  endif
405 
406  ! check if drifters crossed compute domain boundary
407 
408  crossed_w = .false.
409  crossed_e = .false.
410  crossed_s = .false.
411  crossed_n = .false.
412  if( was_in_compute_domain .and. &
413  & (x<self%xcmin) .and. (xold>self%xcmin) ) crossed_w = .true.
414  if( was_in_compute_domain .and. &
415  & (x>self%xcmax) .and. (xold<self%xcmax) ) crossed_e = .true.
416  if( was_in_compute_domain .and. &
417  & (y<self%ycmin) .and. (yold>self%ycmin) ) crossed_s = .true.
418  if( was_in_compute_domain .and. &
419  & (y>self%ycmax) .and. (yold<self%ycmax) ) crossed_n = .true.
420 
421  neigh_pe = _null_pe
422  if(crossed_n .and. .not. crossed_e .and. .not. crossed_w) neigh_pe = self%pe_N
423  if(crossed_n .and. crossed_e ) neigh_pe = self%pe_NE
424  if(crossed_e .and. .not. crossed_n .and. .not. crossed_s) neigh_pe = self%pe_E
425  if(crossed_s .and. crossed_e ) neigh_pe = self%pe_SE
426  if(crossed_s .and. .not. crossed_e .and. .not. crossed_w) neigh_pe = self%pe_S
427  if(crossed_s .and. crossed_w ) neigh_pe = self%pe_SW
428  if(crossed_w .and. .not. crossed_s .and. .not. crossed_n) neigh_pe = self%pe_W
429  if(crossed_n .and. crossed_w ) neigh_pe = self%pe_NW
430 
431  if(neigh_pe /= _null_pe) then
432  iadd(neigh_pe) = iadd(neigh_pe) + 1
433  if(iadd(neigh_pe) > nar_est) then
434  write(notemsg, '(a,i4,a,i4,a)') 'drifters_comm_update: exceeded nar_est (', &
435  & iadd(neigh_pe),'>',nar_est,').'
436  call mpp_error( fatal, notemsg)
437  endif
438  table_send(neigh_pe) = table_send(neigh_pe) + 1
439  k = ( iadd(neigh_pe)-1 )*(1+nd) + 1
440  data_send(k , neigh_pe) = drfts%ids(ip)
441  data_send(k+1:k+nd, neigh_pe) = new_positions(:,ip)
442  endif
443 
444  ! check if drifters left data domain
445 
446  left_domain = .false.
447  if( (x<self%xdmin .and. (self%pe_W/=pe)) .or. &
448  & (x>self%xdmax .and. (self%pe_E/=pe)) .or. &
449  & (y<self%ydmin .and. (self%pe_S/=pe)) .or. &
450  & (y>self%ydmax .and. (self%pe_N/=pe)) ) then
451  left_domain = .true.
452  endif
453 
454  ! remove if particle was tagged as such
455 
456  if(present(remove)) then
457  if(remove(ip)) left_domain = .true.
458  endif
459 
460  if(left_domain) then
461  irem = irem + 1
462  if(irem > nar_est) then
463  write(notemsg, '(a,i4,a,i4,a)') 'drifters_comm_update: exceeded nar_est (',&
464  & irem,'>',nar_est,').'
465  call mpp_error( fatal, notemsg)
466  endif
467  indices_to_remove(irem) = ip
468  endif
469 
470  enddo
471 
472 
473  ! update drifters' positions (remove whatever needs to be removed later)
474  call drifters_core_set_positions(drfts, new_positions, ermsg)
475  if(ermsg/='') call mpp_error( fatal, ermsg)
476 
477  ! fill in table_recv from table_send. table_send contains the
478  ! number of tuples that will be sent to another pe. table_recv
479  ! will contain the number of tuples to be received. The indices
480  ! of table_send refer to the pe where the tuples should be sent to;
481  ! the indices of table_recv refer to the pe number
482  ! (self%pe_beg..self%pe_end) from
483  ! which the tuple should be received from.
484  !
485  ! table_send(to_pe) = ntuples; table_recv(from_pe) = ntuples
486 
487  ! the following is a transpose operation
488  ! table_send(m)[pe] -> table_recv(pe)[m]
489  do m = self%pe_beg, self%pe_end
490 #ifdef _USE_MPI
491  call mpi_scatter (table_send , 1, mpi_integer, &
492  & table_recv(m), 1, mpi_integer, &
493  & m, mycomm, ier )
494 #else
495  if(pe==m) then
496  do k = self%pe_beg, self%pe_end
497  call mpp_send(table_send(k), plen=1, to_pe=k, tag=comm_tag_1)
498  enddo
499  endif
500  call mpp_recv(table_recv(m), glen=1, from_pe=m, tag=comm_tag_1)
501 #endif
502  enddo
503 
504  ! communicate new positions. data_send is an array of size n*(nd+1) times npes.
505  ! Each column j of data_send contains the tuple (id, x, y, ..) to be sent to pe=j.
506  ! Inversely, data_recv's column j contains tuples (id, x, y,..) received from pe=j.
507  do m = self%pe_beg, self%pe_end
508  ntuples = table_send(m)
509  ndata = ntuples*(nd+1)
510  ! should be able to send ndata?
511 #ifdef _USE_MPI
512  call mpi_scatter (data_send , nar_est*(1+nd), mpi_real8, &
513  & data_recv(1,m), nar_est*(1+nd), mpi_real8, &
514  & m, mycomm, ier )
515 #else
516  if(pe==m) then
517  do k = self%pe_beg, self%pe_end
518  call mpp_send(data_send(1,k), plen=nar_est*(1+nd), to_pe=k, tag=comm_tag_2)
519  enddo
520  endif
521  call mpp_recv(data_recv(1,m), glen=nar_est*(1+nd), from_pe=m, tag=comm_tag_2)
522 #endif
523  enddo
524 
525  ! total number of tuples will determine size of ids_to_add/positions_to_add
526  ntuples_tot = 0
527  do m = self%pe_beg, self%pe_end
528  ntuples_tot = ntuples_tot + table_recv(m)
529  enddo
530 
531  allocate(positions_to_add(nd, ntuples_tot))
532  allocate( ids_to_add( ntuples_tot))
533 
534  ! fill positions_to_add and ids_to_add.
535  k = 0
536  do m = self%pe_beg, self%pe_end
537  ! get ids/positions coming from all pes
538  do n = 1, table_recv(m)
539  ! iterate over all ids/positions coming from pe=m
540  el = (n-1)*(nd+1) + 1
541  id = int(data_recv(el, m))
542  ! only add if id not already present in drfts
543  ! this can happen if a drifter meanders about
544  ! the compute domain boundary
545  is_present = .false.
546  do j = 1, drfts%np
547  if(id == drfts%ids(j)) then
548  is_present = .true.
549  write(notemsg, '(a,i4,a)') 'Drifter ', id, ' already advected (will not be added).'
550  call mpp_error(note, notemsg)
551  exit
552  endif
553  enddo
554  if(.not. is_present) then
555  k = k + 1
556  ids_to_add(k) = id
557 
558  positions_to_add(1:nd, k) = data_recv(el+1:el+nd, m)
559 
560  endif
561  enddo
562  enddo
563 
564  ! remove and add
565  if(irem > 0 .or. k > 0) then
566  write(notemsg, '(i4,a,i4,a)') irem, ' drifter(s) will be removed, ', k,' will be added'
567  call mpp_error(note, notemsg)
568 !!$ if(k>0) print *,'positions to add ', positions_to_add(:,1:k)
569 !!$ if(irem>0) print *,'ids to remove: ', indices_to_remove(1:irem)
570  endif
571  call drifters_core_remove_and_add(drfts, indices_to_remove(1:irem), &
572  & ids_to_add(1:k), positions_to_add(:,1:k), ermsg)
573  if(ermsg/='') call mpp_error( fatal, ermsg)
574 
575 #ifndef _USE_MPI
576  ! make sure unbuffered mpp_isend call returned before deallocating
577  call mpp_sync_self()
578 #endif
579 
580  deallocate(ids_to_add)
581  deallocate(positions_to_add)
582 
583  deallocate(iadd)
584  deallocate(table_recv)
585  deallocate(table_send)
586  deallocate(data_recv)
587  deallocate(data_send)
588  deallocate(indices_to_remove)
589 
590 #endif
591 ! end of parallel code
592 
593  end subroutine drifters_comm_update
594 
595 !===============================================================================
596  subroutine drifters_comm_gather(self, drfts, dinp, &
597  & lons, lats, do_save_lonlat, &
598  & filename, &
599  & root, mycomm)
600 
601 #ifndef _SERIAL
602  use mpi
603 #endif
604  use drifters_input_mod, only : drifters_input_type, drifters_input_save
605 
606  type(drifters_comm_type) :: self
607  type(drifters_core_type) :: drfts
608  type(drifters_input_type) :: dinp
609  real, intent(in) :: lons(:), lats(:)
610  logical, intent(in) :: do_save_lonlat
611  character(len=*), intent(in) :: filename
612  integer, intent(in), optional :: root !< root pe
613  integer, intent(in), optional :: mycomm !< MPI communicator
614 
615  character(len=128) :: ermesg
616 
617 #ifdef _SERIAL
618 ! serial code
619 
620  dinp%ids(1:drfts%np) = drfts%ids(1:drfts%np)
621  dinp%positions(:, 1:drfts%np) = drfts%positions(:, 1:drfts%np)
622 
623  if(do_save_lonlat) then
624 
625  call drifters_input_save(dinp, filename=filename, &
626  & geolon=lons, geolat=lats, ermesg=ermesg)
627 
628  else
629 
630  call drifters_input_save(dinp, filename=filename, ermesg=ermesg)
631 
632  endif
633 
634 #else
635 ! parallel code
636 
637 
638  integer :: npf, ip, comm, root_pe, pe, npes, nd, np, npdim, npmax, ier, nptot
639  integer :: i, j, k, kk
640  integer, allocatable :: nps(:)
641  real :: x, y
642  real, allocatable :: lons0(:), lats0(:), recvbuf(:,:)
643  real :: com_data(drfts%nd+3, drfts%np) !communication data
644 
645  comm = mpi_comm_world
646  if(present(mycomm)) comm = mycomm
647 
648  root_pe = mpp_root_pe()
649  if(present(root)) root_pe = root
650 
651  pe = mpp_pe()
652  npes = mpp_npes()
653 
654  nd = drfts%nd
655  np = drfts%np
656  npdim = drfts%npdim
657 
658  allocate(nps(self%pe_beg:self%pe_end))
659  nps = 0
660 
661  ! npf= number of drifters in compute domain
662 
663  npf = 0
664  do ip = 1, np
665  x = drfts%positions(1, ip)
666  y = drfts%positions(2, ip)
667  if( x <= self%xcmax .and. x >= self%xcmin .and. &
668  & y <= self%ycmax .and. y >= self%ycmin) then
669  npf = npf + 1
670  com_data(1 , npf) = real(drfts%ids(ip))
671  com_data(1+1:1+nd, npf) = drfts%positions(:, ip)
672  com_data( 2+nd, npf) = lons(ip)
673  com_data( 3+nd, npf) = lats(ip)
674  endif
675  enddo
676 
677  ! gather number of drifters
678 #ifdef _USE_MPI
679  call mpi_gather(npf, 1, mpi_int, &
680  & nps, 1, mpi_int, &
681  & root_pe, comm, ier)
682  !!if(ier/=0) ermesg = 'drifters_write_restart: ERROR while gathering "npf"'
683 #else
684  call mpp_send(npf, plen=1, to_pe=root_pe, tag=comm_tag_3)
685  if(pe==root_pe) then
686  do i = self%pe_beg, self%pe_end
687  call mpp_recv(nps(i), glen=1, from_pe=i, tag=comm_tag_3)
688  enddo
689  endif
690 #endif
691 
692  ! Now we know the max number of drifters to expect from each PE, so allocate
693  ! recvbuf (first dim will be zero on all PEs except root).
694 
695  ! allocate recvbuf to receive all the data on root PE, strided by npmax*(nd+3)
696  npmax = maxval(nps)
697  allocate(recvbuf(npmax*(nd+3), self%pe_beg:self%pe_end))
698  recvbuf = -1
699 
700  ! Each PE sends data to recvbuf on root_pe.
701 #ifdef _USE_MPI
702  call mpi_gather( com_data , npf*(nd+3), mpi_real8, &
703  & recvbuf, npmax*(nd+3), mpi_real8, &
704  & root_pe, comm, ier)
705  !!if(ier/=0) ermesg = 'drifters_write_restart: ERROR while gathering "data"'
706 #else
707  if(npf > 0) call mpp_send(com_data(1,1), plen=npf*(nd+3), to_pe=root_pe, tag=comm_tag_4)
708  if(pe==root_pe) then
709  do i = self%pe_beg, self%pe_end
710  if(nps(i) > 0) call mpp_recv(recvbuf(1, i), glen=nps(i)*(nd+3), from_pe=i, tag=comm_tag_4)
711  enddo
712  endif
713 #endif
714 
715  ! Set positions and ids
716  if(pe == root_pe) then
717  ! check dims
718  nptot = sum(nps) ! total number of drifters, across al PEs
719  if(nptot /= size(dinp%ids)) then
720  deallocate(dinp%ids , stat=ier)
721  deallocate(dinp%positions, stat=ier)
722  allocate(dinp%ids(nptot))
723  allocate(dinp%positions(nd, nptot))
724  dinp%ids = -1
725  dinp%positions = -huge(1.)
726  endif
727 
728  allocate(lons0(nptot), lats0(nptot))
729 
730  ! Set the new positions/ids in dinp, on PE=root. Also set
731  ! lons/lats, these arrays will hold garbage if x1, y1, etc. were
732  ! not passed as subroutine arguments, that's ok 'cause we won't
733  ! save them.
734  j = 1
735  do i = self%pe_beg, self%pe_end
736  do k = 1, nps(i)
737  kk = (nd+3)*(k-1)
738  dinp%ids(j) = int(recvbuf(kk+1 , i))
739  dinp%positions(1:nd, j) = recvbuf(kk+1+1:kk+1+nd, i)
740  lons0(j) = recvbuf(kk+2+nd, i)
741  lats0(j) = recvbuf(kk+3+nd, i)
742  j = j + 1
743  enddo
744  enddo
745 
746  if(do_save_lonlat) then
747 
748  call drifters_input_save(dinp, filename=filename, &
749  & geolon=lons0, geolat=lats0, ermesg=ermesg)
750 
751  else
752 
753  call drifters_input_save(dinp, filename=filename, ermesg=ermesg)
754 
755  endif
756 
757  deallocate(lons0, lats0)
758 
759  endif
760 
761 #ifndef _USE_MPI
762  call mpp_sync_self()
763 #endif
764  deallocate(nps , stat=ier)
765  deallocate(recvbuf, stat=ier)
766 
767 #endif
768 ! _end of parallel code
769 
770  end subroutine drifters_comm_gather
771 
772 #endif
773 end module drifters_comm_mod
774 
775 !===============================================================================
776 !===============================================================================
Set up a domain decomposition.
These routines retrieve the axis specifications associated with the compute domains....
These routines retrieve the axis specifications associated with the data domains. The domain is a der...
Retrieve layout associated with a domain decomposition The 1D version of this call returns the number...
Retrieve PE number of a neighboring domain.
The domain2D type contains all the necessary information to define the global, compute and data domai...
subroutine mpp_sync_self(pelist, check, request, msg_size, msg_type)
This is to check if current PE's outstanding puts are complete but we can't use shmem_fence because w...
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:420
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:406
Error handler.
Definition: mpp.F90:381
Receive data from another PE.
Definition: mpp.F90:950
Send data to a receiving PE.
Definition: mpp.F90:1017