FMS  2024.03
Flexible Modeling System
drifters_comm.F90
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
10 !*
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 !* for more details.
15 !*
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 #include "fms_switches.h"
20 
21 !> @defgroup drifters_comm_mod drifters_comm_mod
22 !> @ingroup drifters
23 !> @brief Routines and types to update drifter positions across processor domains
24 
25 module drifters_comm_mod
26 #ifdef use_drifters
27 
28 #ifdef _SERIAL
29 
30 #define _TYPE_DOMAIN2D integer
31 #define _NULL_PE 0
32 
33 #else
34 
35  use mpp_mod, only : null_pe, fatal, note, mpp_error, mpp_pe, mpp_npes
36  use mpp_mod, only : mpp_root_pe
37  use mpp_mod, only : mpp_send, mpp_recv, mpp_sync_self
38  use mpp_mod, only : comm_tag_1, comm_tag_2, comm_tag_3, comm_tag_4
39  use mpp_domains_mod, only : domain2d
40  use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_domains, mpp_get_layout
41  use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain
42  use mpp_domains_mod, only : north, south, east, west, cyclic_global_domain
43  use mpp_domains_mod, only : north_east, south_east, south_west, north_west
44 
45 #define _TYPE_DOMAIN2D type(domain2d)
46 #define _NULL_PE NULL_PE
47 
48 #endif
49 
50  use drifters_core_mod, only: drifters_core_type, drifters_core_remove_and_add, drifters_core_set_positions
51 
52  implicit none
53  private
54 
55  public :: drifters_comm_type, drifters_comm_new, drifters_comm_del, drifters_comm_set_pe_neighbors
56  public :: drifters_comm_set_domain, drifters_comm_update, drifters_comm_gather
57 
58  !> Type for drifter communication between PE's
59  !> @ingroup drifters_comm_mod
60  type :: drifters_comm_type
61  real :: xcmin !< compute domain
62  real :: xcmax !< compute domain
63  real :: ycmin !< compute domain
64  real :: ycmax !< compute domain
65  real :: xdmin !< data domain
66  real :: xdmax !< data domain
67  real :: ydmin !< data domain
68  real :: ydmax !< data domain
69  real :: xgmin !< global valid min/max
70  real :: xgmax !< global valid min/max
71  real :: ygmin !< global valid min/max
72  real :: ygmax !< global valid min/max
73  logical :: xperiodic !< x/y period (can be be nearly infinite)
74  logical :: yperiodic !< x/y period (can be be nearly infinite)
75  integer :: pe_N !< neighbor domains
76  integer :: pe_S !< neighbor domains
77  integer :: pe_E !< neighbor domains
78  integer :: pe_W !< neighbor domains
79  integer :: pe_NE !< neighbor domains
80  integer :: pe_SE !< neighbor domains
81  integer :: pe_SW !< neighbor domains
82  integer :: pe_NW !< neighbor domains
83  integer :: pe_beg !< starting/ending pe, set this to a value /= 0 if running concurrently
84  integer :: pe_end !< starting/ending pe, set this to a value /= 0 if running concurrently
85  end type drifters_comm_type
86 
87 contains
88 
89 !> @addtogroup drifters_comm_mod
90 !> @{
91 !===============================================================================
92  !> @brief Initializes default values for @ref drifters_comm_type in self
93  subroutine drifters_comm_new(self)
94  type(drifters_comm_type) :: self !< A new @ref drifters_comm_type
95 
96  self%xcmin = -huge(1.); self%xcmax = +huge(1.)
97  self%ycmin = -huge(1.); self%ycmax = +huge(1.)
98 
99  self%xdmin = -huge(1.); self%xdmax = +huge(1.)
100  self%ydmin = -huge(1.); self%ydmax = +huge(1.)
101 
102  self%xgmin = -huge(1.); self%xgmax = +huge(1.)
103  self%ygmin = -huge(1.); self%ygmax = +huge(1.)
104 
105  self%xperiodic = .false.; self%yperiodic = .false.
106 
107  self%pe_N = _null_pe
108  self%pe_S = _null_pe
109  self%pe_E = _null_pe
110  self%pe_W = _null_pe
111  self%pe_NE = _null_pe
112  self%pe_SE = _null_pe
113  self%pe_SW = _null_pe
114  self%pe_NW = _null_pe
115 
116  self%pe_beg = 0
117  self%pe_end = -1
118 
119 
120  end subroutine drifters_comm_new
121 
122 !===============================================================================
123  !> @brief Reset data in a given @ref drifters_comm_type to defaults
124  subroutine drifters_comm_del(self)
125  type(drifters_comm_type) :: self !< A @ref drifters_comm_type to reset
126 
127  ! nothing to deallocate
128  call drifters_comm_new(self)
129 
130  end subroutine drifters_comm_del
131 
132 !===============================================================================
133  !> @brief Set data domain bounds.
134  subroutine drifters_comm_set_data_bounds(self, xmin, ymin, xmax, ymax)
135  ! Set data domain bounds.
136  type(drifters_comm_type) :: self
137  real, intent(in) :: xmin, ymin, xmax, ymax
138 
139  self%xdmin = max(xmin, self%xdmin)
140  self%xdmax = min(xmax, self%xdmax)
141  self%ydmin = max(ymin, self%ydmin)
142  self%ydmax = min(ymax, self%ydmax)
143 
144  end subroutine drifters_comm_set_data_bounds
145 
146 !===============================================================================
147  !> @brief Set compute domain bounds.
148  subroutine drifters_comm_set_comp_bounds(self, xmin, ymin, xmax, ymax)
149  ! Set compute domain bounds.
150  type(drifters_comm_type) :: self
151  real, intent(in) :: xmin, ymin, xmax, ymax
152 
153  self%xcmin = max(xmin, self%xcmin)
154  self%xcmax = min(xmax, self%xcmax)
155  self%ycmin = max(ymin, self%ycmin)
156  self%ycmax = min(ymax, self%ycmax)
157 
158  end subroutine drifters_comm_set_comp_bounds
159 
160 !===============================================================================
161  !> @brief Set neighboring pe numbers.
162  subroutine drifters_comm_set_pe_neighbors(self, domain)
163  ! Set neighboring pe numbers.
164  type(drifters_comm_type) :: self !< Drifters communication type to set pe numbers for
165  _type_domain2d, intent(inout) :: domain !< domain to get neighboring PE's from
166 
167 #ifndef _SERIAL
168 ! parallel code
169 
170  integer :: pe_N, pe_S, pe_E, pe_W, pe_NE, pe_SE, pe_SW, pe_NW
171 
172  call mpp_get_neighbor_pe(domain, north , pe_n )
173  call mpp_get_neighbor_pe(domain, north_east, pe_ne)
174  call mpp_get_neighbor_pe(domain, east , pe_e )
175  call mpp_get_neighbor_pe(domain, south_east, pe_se)
176  call mpp_get_neighbor_pe(domain, south , pe_s )
177  call mpp_get_neighbor_pe(domain, south_west, pe_sw)
178  call mpp_get_neighbor_pe(domain, west , pe_w )
179  call mpp_get_neighbor_pe(domain, north_west, pe_nw)
180 
181  if(pe_n /= self%pe_N .and. self%pe_N == _null_pe) then
182  self%pe_N = pe_n
183  else if(pe_n /= self%pe_N ) then
184  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH PE changed!.')
185  endif
186  if(pe_ne /= self%pe_NE .and. self%pe_NE == _null_pe) then
187  self%pe_NE = pe_ne
188  else if(pe_ne /= self%pe_NE) then
189  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH-EAST PE changed!.')
190  endif
191  if(pe_e /= self%pe_E .and. self%pe_E == _null_pe) then
192  self%pe_E = pe_e
193  else if(pe_e /= self%pe_E ) then
194  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: EAST PE changed!.')
195  endif
196  if(pe_se /= self%pe_SE .and. self%pe_SE == _null_pe) then
197  self%pe_SE = pe_se
198  else if(pe_se /= self%pe_SE) then
199  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH-EAST PE changed!.')
200  endif
201  if(pe_s /= self%pe_S .and. self%pe_S == _null_pe) then
202  self%pe_S = pe_s
203  else if(pe_s /= self%pe_S ) then
204  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH PE changed!.')
205  endif
206  if(pe_sw /= self%pe_SW .and. self%pe_SW == _null_pe) then
207  self%pe_SW = pe_sw
208  else if(pe_sw /= self%pe_SW) then
209  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: SOUTH-WEST PE changed!.')
210  endif
211  if(pe_w /= self%pe_W .and. self%pe_W == _null_pe) then
212  self%pe_W = pe_w
213  else if(pe_w /= self%pe_W ) then
214  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: WEST PE changed!.')
215  endif
216  if(pe_nw /= self%pe_NW .and. self%pe_NW == _null_pe) then
217  self%pe_NW = pe_nw
218  else if(pe_nw /= self%pe_NW) then
219  call mpp_error( fatal, 'drifters_comm_set_pe_neighbors: NORTH-WEST PE changed!.')
220  endif
221 
222 #endif
223 ! end of parallel code
224 
225  end subroutine drifters_comm_set_pe_neighbors
226 
227 !===============================================================================
228  !> @brief Set boundaries of domain and compute neighbors. This method can be called
229  !! multiple times; the data domain will just be the intersection (overlap) of
230  !! all domains (e.g domain_u, domain_v, etc).
231  subroutine drifters_comm_set_domain(self, domain, x, y, backoff_x, backoff_y)
232  ! Set boundaries of domain and compute neighbors. This method can be called
233  ! multiple times; the data domain will just be the intersection (overlap) of
234  ! all domains (e.g domain_u, domain_v, etc).
235  type(drifters_comm_type) :: self
236  _type_domain2d, intent(inout) :: domain
237  real, intent(in) :: x(:) !< global axes
238  real, intent(in) :: y(:) !< global axes
239  integer, intent(in) :: backoff_x !< >=0, data domain is reduced by "backoff_x,y" indices in x, resp. y
240  integer, intent(in) :: backoff_y !< >=0, data domain is reduced by "backoff_x,y" indices in x, resp. y
241 
242  ! compute/data domain start/end indices
243  integer :: isc !< compute domain start/end indices
244  integer :: iec !< compute domain start/end indices
245  integer :: jsc !< compute domain start/end indices
246  integer :: jec !< compute domain start/end indices
247  integer :: isd !< data domain start/end indices
248  integer :: ied !< data domain start/end indices
249  integer :: jsd !< data domain start/end indices
250  integer :: jed !< data domain start/end indices
251  integer nx, ny, hx, hy, bckf_x, bckf_y, halox, haloy
252  real dx, dy, xdmin, xdmax, ydmin, ydmax
253 
254 #ifdef _SERIAL
255  integer :: ibnds(1)
256 
257  ibnds = lbound(x); isc = ibnds(1)
258  ibnds = ubound(x); iec = ibnds(1) - 1
259  ibnds = lbound(y); jsc = ibnds(1)
260  ibnds = ubound(y); jec = ibnds(1) - 1
261 #else
262  call mpp_get_compute_domain( domain, isc, iec, jsc, jec )
263 #endif
264 
265  self%xcmin = max(x(isc), self%xcmin)
266  self%xcmax = min(x(iec), self%xcmax)
267  self%ycmin = max(y(jsc), self%ycmin)
268  self%ycmax = min(y(jec), self%ycmax)
269 
270  nx = iec - isc + 1
271  ny = jec - jsc + 1
272 
273 #ifdef _SERIAL
274  isd = 1; ied = size(x); jsd = 1; jed = size(y)
275 #else
276  call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
277 #endif
278 
279  hx = max(ied-iec, isc-isd)
280  hy = max(jed-jec, jsc-jsd)
281  bckf_x = min(backoff_x, hx)
282  bckf_y = min(backoff_y, hy)
283 
284  halox = max(0, hx - bckf_x)
285  haloy = max(0, hy - bckf_y)
286 
287  if(isd < 1) then
288  dx = x(2) - x(1)
289  xdmin = self%xcmin - dx*halox
290  else
291  xdmin = x(isd+bckf_x)
292  endif
293 
294  if(ied > nx) then
295  dx = x(nx) - x(nx-1)
296  xdmax = self%xcmax + dx*halox
297  else
298  xdmax = x(ied-bckf_x)
299  endif
300 
301  if(jsd < 1) then
302  dy = y(2) - y(1)
303  ydmin = self%ycmin - dy*haloy
304  else
305  ydmin = y(jsd+bckf_y)
306  endif
307 
308  if(jed > ny) then
309  dy = y(ny) - y(ny-1)
310  ydmax = self%ycmax + dy*haloy
311  else
312  ydmax = y(jed-bckf_y)
313  endif
314 
315  self%xdmin = max(xdmin, self%xdmin)
316  self%ydmin = max(ydmin, self%ydmin)
317  self%xdmax = min(xdmax, self%xdmax)
318  self%ydmax = min(ydmax, self%ydmax)
319 
320  call drifters_comm_set_pe_neighbors(self, domain)
321 
322  end subroutine drifters_comm_set_domain
323 
324 !===============================================================================
325  !> Updates drifter communication
326  subroutine drifters_comm_update(self, drfts, new_positions, &
327  & comm, remove, max_add_remove)
328 #ifndef _SERIAL
329  use mpi
330 #endif
331 
332  type(drifters_comm_type) :: self
333  type(drifters_core_type) :: drfts
334  real, intent(inout) :: new_positions(:,:)
335  integer, intent(in), optional :: comm !< MPI communicator
336  logical, intent(in), optional :: remove(:) !< Set to True for particles that should be removed
337  integer, intent(in), optional :: max_add_remove !< max no of particles to add/remove
338 
339 #ifdef _SERIAL
340 ! serial code
341 
342  drfts%positions(:, 1:drfts%np) = new_positions(:, 1:drfts%np)
343  return
344 
345 #else
346 ! parallel code
347  integer nd, np, nar_est, ip, neigh_pe, irem, pe, npes, ntuples
348  integer ntuples_tot, ndata, mycomm
349 #ifdef _USE_MPI
350  integer ier
351 #endif
352  integer, allocatable :: iadd(:)
353  integer, allocatable :: table_recv(:), table_send(:)
354  real , allocatable :: data_recv(:,:), data_send(:,:)
355  integer, allocatable :: indices_to_remove(:)
356  integer, allocatable :: ids_to_add(:)
357  real , allocatable :: positions_to_add(:,:)
358  real :: x, y, xold, yold
359  character(len=128) :: ermsg, notemsg
360  logical :: is_present
361  integer :: id, j, k, m, n, el
362  logical :: crossed_W, crossed_E, crossed_S, crossed_N
363  logical :: was_in_compute_domain, left_domain
364 
365  mycomm = mpi_comm_world
366  if( present(comm) ) mycomm = comm
367 
368  nd = drfts%nd
369  np = size(new_positions,2)
370  if(np > 0 .and. nd < 2) call mpp_error( fatal, &
371  & 'drifters_comm_update: number of dimensions must be 2 or higher.' )
372 
373  nar_est = 100
374  if(present(max_add_remove)) nar_est = max(1, max_add_remove)
375 
376  pe = mpp_pe()
377  npes = mpp_npes()
378 
379  ! assume pe list is contiguous, self%pe_beg...self%pe_end
380  allocate(iadd(self%pe_beg:self%pe_end))
381  allocate(table_recv(self%pe_beg:self%pe_end))
382  allocate(table_send(self%pe_beg:self%pe_end))
383  allocate(data_recv(nar_est*(1+nd), self%pe_beg:self%pe_end))
384  allocate(data_send(nar_est*(1+nd), self%pe_beg:self%pe_end))
385  allocate(indices_to_remove(nar_est))
386 
387  table_send = 0
388  table_recv = 0
389  data_send = 0
390  data_recv = 0
391 
392  iadd = 0
393  irem = 0
394  do ip = 1, np
395  x = new_positions(1, ip)
396  y = new_positions(2, ip)
397  xold = drfts%positions(1, ip)
398  yold = drfts%positions(2, ip)
399 
400  if( xold<self%xcmin .or. xold>self%xcmax .or. &
401  & yold<self%ycmin .or. yold>self%ycmax ) then
402  was_in_compute_domain = .false.
403  else
404  was_in_compute_domain = .true.
405  endif
406 
407  ! check if drifters crossed compute domain boundary
408 
409  crossed_w = .false.
410  crossed_e = .false.
411  crossed_s = .false.
412  crossed_n = .false.
413  if( was_in_compute_domain .and. &
414  & (x<self%xcmin) .and. (xold>self%xcmin) ) crossed_w = .true.
415  if( was_in_compute_domain .and. &
416  & (x>self%xcmax) .and. (xold<self%xcmax) ) crossed_e = .true.
417  if( was_in_compute_domain .and. &
418  & (y<self%ycmin) .and. (yold>self%ycmin) ) crossed_s = .true.
419  if( was_in_compute_domain .and. &
420  & (y>self%ycmax) .and. (yold<self%ycmax) ) crossed_n = .true.
421 
422  neigh_pe = _null_pe
423  if(crossed_n .and. .not. crossed_e .and. .not. crossed_w) neigh_pe = self%pe_N
424  if(crossed_n .and. crossed_e ) neigh_pe = self%pe_NE
425  if(crossed_e .and. .not. crossed_n .and. .not. crossed_s) neigh_pe = self%pe_E
426  if(crossed_s .and. crossed_e ) neigh_pe = self%pe_SE
427  if(crossed_s .and. .not. crossed_e .and. .not. crossed_w) neigh_pe = self%pe_S
428  if(crossed_s .and. crossed_w ) neigh_pe = self%pe_SW
429  if(crossed_w .and. .not. crossed_s .and. .not. crossed_n) neigh_pe = self%pe_W
430  if(crossed_n .and. crossed_w ) neigh_pe = self%pe_NW
431 
432  if(neigh_pe /= _null_pe) then
433  iadd(neigh_pe) = iadd(neigh_pe) + 1
434  if(iadd(neigh_pe) > nar_est) then
435  write(notemsg, '(a,i4,a,i4,a)') 'drifters_comm_update: exceeded nar_est (', &
436  & iadd(neigh_pe),'>',nar_est,').'
437  call mpp_error( fatal, notemsg)
438  endif
439  table_send(neigh_pe) = table_send(neigh_pe) + 1
440  k = ( iadd(neigh_pe)-1 )*(1+nd) + 1
441  data_send(k , neigh_pe) = drfts%ids(ip)
442  data_send(k+1:k+nd, neigh_pe) = new_positions(:,ip)
443  endif
444 
445  ! check if drifters left data domain
446 
447  left_domain = .false.
448  if( (x<self%xdmin .and. (self%pe_W/=pe)) .or. &
449  & (x>self%xdmax .and. (self%pe_E/=pe)) .or. &
450  & (y<self%ydmin .and. (self%pe_S/=pe)) .or. &
451  & (y>self%ydmax .and. (self%pe_N/=pe)) ) then
452  left_domain = .true.
453  endif
454 
455  ! remove if particle was tagged as such
456 
457  if(present(remove)) then
458  if(remove(ip)) left_domain = .true.
459  endif
460 
461  if(left_domain) then
462  irem = irem + 1
463  if(irem > nar_est) then
464  write(notemsg, '(a,i4,a,i4,a)') 'drifters_comm_update: exceeded nar_est (',&
465  & irem,'>',nar_est,').'
466  call mpp_error( fatal, notemsg)
467  endif
468  indices_to_remove(irem) = ip
469  endif
470 
471  enddo
472 
473 
474  ! update drifters' positions (remove whatever needs to be removed later)
475  call drifters_core_set_positions(drfts, new_positions, ermsg)
476  if(ermsg/='') call mpp_error( fatal, ermsg)
477 
478  ! fill in table_recv from table_send. table_send contains the
479  ! number of tuples that will be sent to another pe. table_recv
480  ! will contain the number of tuples to be received. The indices
481  ! of table_send refer to the pe where the tuples should be sent to;
482  ! the indices of table_recv refer to the pe number
483  ! (self%pe_beg..self%pe_end) from
484  ! which the tuple should be received from.
485  !
486  ! table_send(to_pe) = ntuples; table_recv(from_pe) = ntuples
487 
488  ! the following is a transpose operation
489  ! table_send(m)[pe] -> table_recv(pe)[m]
490  do m = self%pe_beg, self%pe_end
491 #ifdef _USE_MPI
492  call mpi_scatter (table_send , 1, mpi_integer, &
493  & table_recv(m), 1, mpi_integer, &
494  & m, mycomm, ier )
495 #else
496  if(pe==m) then
497  do k = self%pe_beg, self%pe_end
498  call mpp_send(table_send(k), plen=1, to_pe=k, tag=comm_tag_1)
499  enddo
500  endif
501  call mpp_recv(table_recv(m), glen=1, from_pe=m, tag=comm_tag_1)
502 #endif
503  enddo
504 
505  ! communicate new positions. data_send is an array of size n*(nd+1) times npes.
506  ! Each column j of data_send contains the tuple (id, x, y, ..) to be sent to pe=j.
507  ! Inversely, data_recv's column j contains tuples (id, x, y,..) received from pe=j.
508  do m = self%pe_beg, self%pe_end
509  ntuples = table_send(m)
510  ndata = ntuples*(nd+1)
511  ! should be able to send ndata?
512 #ifdef _USE_MPI
513  call mpi_scatter (data_send , nar_est*(1+nd), mpi_real8, &
514  & data_recv(1,m), nar_est*(1+nd), mpi_real8, &
515  & m, mycomm, ier )
516 #else
517  if(pe==m) then
518  do k = self%pe_beg, self%pe_end
519  call mpp_send(data_send(1,k), plen=nar_est*(1+nd), to_pe=k, tag=comm_tag_2)
520  enddo
521  endif
522  call mpp_recv(data_recv(1,m), glen=nar_est*(1+nd), from_pe=m, tag=comm_tag_2)
523 #endif
524  enddo
525 
526  ! total number of tuples will determine size of ids_to_add/positions_to_add
527  ntuples_tot = 0
528  do m = self%pe_beg, self%pe_end
529  ntuples_tot = ntuples_tot + table_recv(m)
530  enddo
531 
532  allocate(positions_to_add(nd, ntuples_tot))
533  allocate( ids_to_add( ntuples_tot))
534 
535  ! fill positions_to_add and ids_to_add.
536  k = 0
537  do m = self%pe_beg, self%pe_end
538  ! get ids/positions coming from all pes
539  do n = 1, table_recv(m)
540  ! iterate over all ids/positions coming from pe=m
541  el = (n-1)*(nd+1) + 1
542  id = int(data_recv(el, m))
543  ! only add if id not already present in drfts
544  ! this can happen if a drifter meanders about
545  ! the compute domain boundary
546  is_present = .false.
547  do j = 1, drfts%np
548  if(id == drfts%ids(j)) then
549  is_present = .true.
550  write(notemsg, '(a,i4,a)') 'Drifter ', id, ' already advected (will not be added).'
551  call mpp_error(note, notemsg)
552  exit
553  endif
554  enddo
555  if(.not. is_present) then
556  k = k + 1
557  ids_to_add(k) = id
558 
559  positions_to_add(1:nd, k) = data_recv(el+1:el+nd, m)
560 
561  endif
562  enddo
563  enddo
564 
565  ! remove and add
566  if(irem > 0 .or. k > 0) then
567  write(notemsg, '(i4,a,i4,a)') irem, ' drifter(s) will be removed, ', k,' will be added'
568  call mpp_error(note, notemsg)
569 !!$ if(k>0) print *,'positions to add ', positions_to_add(:,1:k)
570 !!$ if(irem>0) print *,'ids to remove: ', indices_to_remove(1:irem)
571  endif
572  call drifters_core_remove_and_add(drfts, indices_to_remove(1:irem), &
573  & ids_to_add(1:k), positions_to_add(:,1:k), ermsg)
574  if(ermsg/='') call mpp_error( fatal, ermsg)
575 
576 #ifndef _USE_MPI
577  ! make sure unbuffered mpp_isend call returned before deallocating
578  call mpp_sync_self()
579 #endif
580 
581  deallocate(ids_to_add)
582  deallocate(positions_to_add)
583 
584  deallocate(iadd)
585  deallocate(table_recv)
586  deallocate(table_send)
587  deallocate(data_recv)
588  deallocate(data_send)
589  deallocate(indices_to_remove)
590 
591 #endif
592 ! end of parallel code
593 
594  end subroutine drifters_comm_update
595 
596 !===============================================================================
597  subroutine drifters_comm_gather(self, drfts, dinp, &
598  & lons, lats, do_save_lonlat, &
599  & filename, &
600  & root, mycomm)
601 
602 #ifndef _SERIAL
603  use mpi
604 #endif
605  use drifters_input_mod, only : drifters_input_type, drifters_input_save
606 
607  type(drifters_comm_type) :: self
608  type(drifters_core_type) :: drfts
609  type(drifters_input_type) :: dinp
610  real, intent(in) :: lons(:), lats(:)
611  logical, intent(in) :: do_save_lonlat
612  character(len=*), intent(in) :: filename
613  integer, intent(in), optional :: root !< root pe
614  integer, intent(in), optional :: mycomm !< MPI communicator
615 
616  character(len=128) :: ermesg
617 
618 #ifdef _SERIAL
619 ! serial code
620 
621  dinp%ids(1:drfts%np) = drfts%ids(1:drfts%np)
622  dinp%positions(:, 1:drfts%np) = drfts%positions(:, 1:drfts%np)
623 
624  if(do_save_lonlat) then
625 
626  call drifters_input_save(dinp, filename=filename, &
627  & geolon=lons, geolat=lats, ermesg=ermesg)
628 
629  else
630 
631  call drifters_input_save(dinp, filename=filename, ermesg=ermesg)
632 
633  endif
634 
635 #else
636 ! parallel code
637 
638 
639  integer :: npf, ip, comm, root_pe, pe, npes, nd, np, npdim, npmax, ier, nptot
640  integer :: i, j, k, kk
641  integer, allocatable :: nps(:)
642  real :: x, y
643  real, allocatable :: lons0(:), lats0(:), recvbuf(:,:)
644  real :: com_data(drfts%nd+3, drfts%np) !communication data
645 
646  comm = mpi_comm_world
647  if(present(mycomm)) comm = mycomm
648 
649  root_pe = mpp_root_pe()
650  if(present(root)) root_pe = root
651 
652  pe = mpp_pe()
653  npes = mpp_npes()
654 
655  nd = drfts%nd
656  np = drfts%np
657  npdim = drfts%npdim
658 
659  allocate(nps(self%pe_beg:self%pe_end))
660  nps = 0
661 
662  ! npf= number of drifters in compute domain
663 
664  npf = 0
665  do ip = 1, np
666  x = drfts%positions(1, ip)
667  y = drfts%positions(2, ip)
668  if( x <= self%xcmax .and. x >= self%xcmin .and. &
669  & y <= self%ycmax .and. y >= self%ycmin) then
670  npf = npf + 1
671  com_data(1 , npf) = real(drfts%ids(ip))
672  com_data(1+1:1+nd, npf) = drfts%positions(:, ip)
673  com_data( 2+nd, npf) = lons(ip)
674  com_data( 3+nd, npf) = lats(ip)
675  endif
676  enddo
677 
678  ! gather number of drifters
679 #ifdef _USE_MPI
680  call mpi_gather(npf, 1, mpi_int, &
681  & nps, 1, mpi_int, &
682  & root_pe, comm, ier)
683  !!if(ier/=0) ermesg = 'drifters_write_restart: ERROR while gathering "npf"'
684 #else
685  call mpp_send(npf, plen=1, to_pe=root_pe, tag=comm_tag_3)
686  if(pe==root_pe) then
687  do i = self%pe_beg, self%pe_end
688  call mpp_recv(nps(i), glen=1, from_pe=i, tag=comm_tag_3)
689  enddo
690  endif
691 #endif
692 
693  ! Now we know the max number of drifters to expect from each PE, so allocate
694  ! recvbuf (first dim will be zero on all PEs except root).
695 
696  ! allocate recvbuf to receive all the data on root PE, strided by npmax*(nd+3)
697  npmax = maxval(nps)
698  allocate(recvbuf(npmax*(nd+3), self%pe_beg:self%pe_end))
699  recvbuf = -1
700 
701  ! Each PE sends data to recvbuf on root_pe.
702 #ifdef _USE_MPI
703  call mpi_gather( com_data , npf*(nd+3), mpi_real8, &
704  & recvbuf, npmax*(nd+3), mpi_real8, &
705  & root_pe, comm, ier)
706  !!if(ier/=0) ermesg = 'drifters_write_restart: ERROR while gathering "data"'
707 #else
708  if(npf > 0) call mpp_send(com_data(1,1), plen=npf*(nd+3), to_pe=root_pe, tag=comm_tag_4)
709  if(pe==root_pe) then
710  do i = self%pe_beg, self%pe_end
711  if(nps(i) > 0) call mpp_recv(recvbuf(1, i), glen=nps(i)*(nd+3), from_pe=i, tag=comm_tag_4)
712  enddo
713  endif
714 #endif
715 
716  ! Set positions and ids
717  if(pe == root_pe) then
718  ! check dims
719  nptot = sum(nps) ! total number of drifters, across al PEs
720  if(nptot /= size(dinp%ids)) then
721  deallocate(dinp%ids , stat=ier)
722  deallocate(dinp%positions, stat=ier)
723  allocate(dinp%ids(nptot))
724  allocate(dinp%positions(nd, nptot))
725  dinp%ids = -1
726  dinp%positions = -huge(1.)
727  endif
728 
729  allocate(lons0(nptot), lats0(nptot))
730 
731  ! Set the new positions/ids in dinp, on PE=root. Also set
732  ! lons/lats, these arrays will hold garbage if x1, y1, etc. were
733  ! not passed as subroutine arguments, that's ok 'cause we won't
734  ! save them.
735  j = 1
736  do i = self%pe_beg, self%pe_end
737  do k = 1, nps(i)
738  kk = (nd+3)*(k-1)
739  dinp%ids(j) = int(recvbuf(kk+1 , i))
740  dinp%positions(1:nd, j) = recvbuf(kk+1+1:kk+1+nd, i)
741  lons0(j) = recvbuf(kk+2+nd, i)
742  lats0(j) = recvbuf(kk+3+nd, i)
743  j = j + 1
744  enddo
745  enddo
746 
747  if(do_save_lonlat) then
748 
749  call drifters_input_save(dinp, filename=filename, &
750  & geolon=lons0, geolat=lats0, ermesg=ermesg)
751 
752  else
753 
754  call drifters_input_save(dinp, filename=filename, ermesg=ermesg)
755 
756  endif
757 
758  deallocate(lons0, lats0)
759 
760  endif
761 
762 #ifndef _USE_MPI
763  call mpp_sync_self()
764 #endif
765  deallocate(nps , stat=ier)
766  deallocate(recvbuf, stat=ier)
767 
768 #endif
769 ! _end of parallel code
770 
771  end subroutine drifters_comm_gather
772 
773 #endif
774 end module drifters_comm_mod
775 
776 !===============================================================================
777 !===============================================================================
Set up a domain decomposition.
These routines retrieve the axis specifications associated with the compute domains....
These routines retrieve the axis specifications associated with the data domains. The domain is a der...
Retrieve layout associated with a domain decomposition The 1D version of this call returns the number...
Retrieve PE number of a neighboring domain.
The domain2D type contains all the necessary information to define the global, compute and data domai...
subroutine mpp_sync_self(pelist, check, request, msg_size, msg_type)
This is to check if current PE's outstanding puts are complete but we can't use shmem_fence because w...
integer function mpp_npes()
Returns processor count for current pelist.
Definition: mpp_util.inc:421
integer function mpp_pe()
Returns processor ID.
Definition: mpp_util.inc:407
Error handler.
Definition: mpp.F90:382
Recieve data from another PE.
Definition: mpp.F90:937
Send data to a receiving PE.
Definition: mpp.F90:1004