MODFLOW 6  version 6.7.0.dev1
USGS Modular Hydrologic Model
MpiRouter.f90
Go to the documentation of this file.
3  use kindmodule, only: i4b, lgp
4  use profilermodule, only: g_prof
16  use mpiworldmodule
17  use mpi
18  implicit none
19  private
20 
21  public :: create_mpi_router
22 
23  type, public, extends(routerbasetype) :: mpiroutertype
24  integer(I4B), dimension(:), pointer :: model_proc_ids
25  type(stlvecint) :: senders !< the process ids to receive data from
26  type(stlvecint) :: receivers !< the process ids to send data to
27  type(vdcptrtype), dimension(:), pointer :: all_models => null() !< all virtual models from the global list
28  type(vdcptrtype), dimension(:), pointer :: all_exchanges => null() !< all virtual exchanges from the global list
29  type(vdcptrtype), dimension(:), pointer :: rte_models => null() !< the currently active models to be routed
30  type(vdcptrtype), dimension(:), pointer :: rte_exchanges => null() !< the currently active exchanges to be routed
31  type(mpimessagebuildertype) :: message_builder
32  type(mpimessagecachetype) :: msg_cache
33  type(mpiworldtype), pointer :: mpi_world => null()
34  integer(I4B) :: imon !< the output file unit for the mpi monitor
35  logical(LGP) :: enable_monitor !< when true, log diagnostics
36  integer(I4B), dimension(:, :), allocatable :: tmr_mpi_wait !< array with timer handles for MPI_Wait calls
37  contains
38  procedure :: initialize => mr_initialize
39  procedure :: route_all => mr_route_all
40  procedure :: route_sln => mr_route_sln
41  procedure :: finalize => mr_finalize
42  procedure :: destroy => mr_destroy
43  ! private
44  procedure, private :: activate
45  procedure, private :: deactivate
46  procedure, private :: update_senders
47  procedure, private :: update_receivers
48  procedure, private :: route_active
49  procedure, private :: is_cached
50  procedure, private :: compose_messages
51  procedure, private :: load_messages
52  end type mpiroutertype
53 
54 contains
55 
56  !> Factory method to create MPI router
57  !<
58  function create_mpi_router() result(router)
59  class(routerbasetype), pointer :: router
60  ! local
61  class(mpiroutertype), pointer :: mpi_router
62 
63  allocate (mpi_router)
64  router => mpi_router
65 
66  end function create_mpi_router
67 
68  subroutine mr_initialize(this)
69  use inputoutputmodule, only: getunit
70  use constantsmodule, only: linelength
71  class(mpiroutertype) :: this
72  ! local
73  integer :: ierr
74  integer(I4B) :: i
75  integer(I4B) :: nr_models, nr_exchanges
76  class(virtualdatacontainertype), pointer :: vdc
77  character(len=LINELENGTH) :: monitor_file
78 
79  ! allocate timer handles: global + nr. solutions
80  allocate (this%tmr_mpi_wait(nr_sim_stages, this%nr_virt_solutions + 1))
81  this%tmr_mpi_wait = -1
82 
83  ! routing over all when starting
84  this%halo_activated = .false.
85 
86  ! to log or not to log
87  this%enable_monitor = .false.
88 
89  ! initialize the MPI message builder and cache
90  call this%message_builder%init()
91  call this%msg_cache%init()
92 
93  ! get mpi world for our process
94  this%mpi_world => get_mpi_world()
95 
96  ! init address list
97  call this%senders%init()
98  call this%receivers%init()
99 
100  ! find out where models are
101  nr_models = virtual_model_list%Count()
102  nr_exchanges = virtual_exchange_list%Count()
103  allocate (this%model_proc_ids(nr_models))
104  allocate (this%all_models(nr_models))
105  allocate (this%all_exchanges(nr_exchanges))
106 
107  do i = 1, nr_models
109  this%all_models(i)%ptr => vdc
110  if (vdc%is_local) then
111  this%model_proc_ids(i) = proc_id
112  else
113  this%model_proc_ids(i) = 0
114  end if
115  end do
116 
117  call mpi_allreduce(mpi_in_place, this%model_proc_ids, nr_models, &
118  mpi_integer, mpi_sum, this%mpi_world%comm, ierr)
119  call check_mpi(ierr)
120 
121  ! set the process id to the models and exchanges
122  do i = 1, nr_models
124  call vdc%set_orig_rank(this%model_proc_ids(i))
125  end do
126 
127  do i = 1, nr_exchanges
129  this%all_exchanges(i)%ptr => vdc
130  select type (vex => vdc)
131  class is (virtualexchangetype)
132  call vex%set_orig_rank(vex%v_model1%orig_rank)
133  if (vex%v_model1%is_local) then
134  call vex%set_orig_rank(vex%v_model2%orig_rank)
135  end if
136  end select
137  end do
138 
139  ! open log file
140  if (this%enable_monitor) then
141  this%imon = getunit()
142  write (monitor_file, '(a,i0,a)') "mpi.p", proc_id, ".log"
143  open (unit=this%imon, file=monitor_file)
144  call this%message_builder%set_monitor(this%imon)
145 
146  ! write initial info
147  write (this%imon, '(a,/)') ">> initialize MPI Router:"
148  write (this%imon, '(2x,a,i0)') "process id: ", proc_id
149  write (this%imon, '(2x,a,i0)') "nr. of processes: ", nr_procs
150  write (this%imon, '(2x,a,i0)') "nr. of models: ", nr_models
151  write (this%imon, '(2x,a,i0)') "nr. of exchanges: ", nr_exchanges
152  write (this%imon, '(2x,2a)') "model id, processor id:"
153  do i = 1, nr_models
154  write (this%imon, '(4x,2i8)') i, this%model_proc_ids(i)
155  end do
156  write (this%imon, '(a,/)') "<< initialize done"
157  end if
158 
159  end subroutine mr_initialize
160 
161  !> @brief Activate models and exchanges for routing
162  !<
163  subroutine activate(this, models, exchanges)
164  class(mpiroutertype) :: this
165  type(vdcptrtype), dimension(:), pointer :: models
166  type(vdcptrtype), dimension(:), pointer :: exchanges
167 
168  this%rte_models => models
169  this%rte_exchanges => exchanges
170  call this%message_builder%attach_data(models, exchanges)
171 
172  end subroutine activate
173 
174  !> @brief Deactivate data after routing
175  !<
176  subroutine deactivate(this)
177  class(mpiroutertype) :: this
178 
179  this%rte_models => null()
180  this%rte_exchanges => null()
181  call this%message_builder%release_data()
182 
183  end subroutine deactivate
184 
185  !> @brief This will route all remote data from the
186  !! global models and exchanges over MPI, for a
187  !< given stage
188  subroutine mr_route_all(this, stage)
190  class(mpiroutertype) :: this
191  integer(I4B) :: stage
192 
193  if (this%enable_monitor) then
194  write (this%imon, '(/,2a)') ">> routing all: ", stg_to_str(stage)
195  end if
196 
197  ! route all
198  call this%activate(this%all_models, this%all_exchanges)
199  call this%route_active(0, stage)
200  call this%deactivate()
201 
202  if (this%enable_monitor) then
203  write (this%imon, '(a,/)') "<< end routing all"
204  !call mem_print_detailed(this%imon)
205  end if
206 
207  end subroutine mr_route_all
208 
209  !> @brief This will route all remote data from models
210  !! and exchanges in a particular solution over MPI,
211  !< for a given stage
212  subroutine mr_route_sln(this, virtual_sol, stage)
213  class(mpiroutertype) :: this
214  type(virtualsolutiontype) :: virtual_sol
215  integer(I4B) :: stage
216 
217  if (this%enable_monitor) then
218  write (this%imon, '(/,a,i0,2a)') ">> routing solution: ", &
219  virtual_sol%solution_id, ", ", stg_to_str(stage)
220  end if
221 
222  ! route for this solution
223  call this%activate(virtual_sol%models, virtual_sol%exchanges)
224  call this%route_active(virtual_sol%solution_id, stage)
225  call this%deactivate()
226 
227  if (this%enable_monitor) then
228  write (this%imon, '(a)') "<< end routing solution"
229  end if
230 
231  end subroutine mr_route_sln
232 
233  !> @brief Routes the models and exchanges over MPI,
234  !! either constructing the message bodies in a sequence
235  !! of communication steps, or by loading from cache
236  !< for efficiency.
237  subroutine route_active(this, unit, stage)
238  use simmodule, only: store_error
239  class(mpiroutertype) :: this !< this mpi router
240  integer(I4B) :: unit !< the solution id, or equal to 0 when global
241  integer(I4B) :: stage !< the stage to route
242  ! local
243  integer(I4B) :: i
244  integer(I4B) :: rnk
245  integer :: ierr, msg_size
246  logical(LGP) :: from_cache
247  ! mpi handles
248  integer, dimension(:), allocatable :: rcv_req
249  integer, dimension(:), allocatable :: snd_req
250  integer, dimension(:, :), allocatable :: rcv_stat
251 
252  ! message body
253  integer, dimension(:), allocatable :: body_rcv_t
254  integer, dimension(:), allocatable :: body_snd_t
255 
256  ! update address list
257  call this%update_senders()
258  call this%update_receivers()
259 
260  ! allocate body data
261  allocate (body_rcv_t(this%senders%size))
262  allocate (body_snd_t(this%receivers%size))
263 
264  ! allocate handles
265  allocate (rcv_req(this%senders%size))
266  allocate (snd_req(this%receivers%size))
267  allocate (rcv_stat(mpi_status_size, this%senders%size))
268 
269  ! always initialize request handles
270  rcv_req = mpi_request_null
271  snd_req = mpi_request_null
272 
273  if (this%enable_monitor) then
274  write (this%imon, '(2x,a,*(i3))') "process ids sending data: ", &
275  this%senders%get_values()
276  write (this%imon, '(2x,a,*(i3))') "process ids receiving data: ", &
277  this%receivers%get_values()
278  end if
279 
280  ! from cache or build
281  from_cache = this%is_cached(unit, stage)
282  if (.not. from_cache) then
283  call this%compose_messages(unit, stage, body_snd_t, body_rcv_t)
284  else
285  call this%load_messages(unit, stage, body_snd_t, body_rcv_t)
286  end if
287 
288  if (this%enable_monitor) then
289  write (this%imon, '(2x,a)') "== communicating bodies =="
290  end if
291 
292  ! recv bodies
293  do i = 1, this%senders%size
294  rnk = this%senders%at(i)
295  if (this%enable_monitor) then
296  write (this%imon, '(4x,a,i0)') "receiving from process: ", rnk
297  end if
298 
299  call mpi_type_size(body_rcv_t(i), msg_size, ierr)
300  if (msg_size > 0) then
301  call mpi_irecv(mpi_bottom, 1, body_rcv_t(i), rnk, stage, &
302  this%mpi_world%comm, rcv_req(i), ierr)
303  call check_mpi(ierr)
304  end if
305 
306  if (this%enable_monitor) then
307  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
308  end if
309  end do
310 
311  ! send bodies
312  do i = 1, this%receivers%size
313  rnk = this%receivers%at(i)
314  if (this%enable_monitor) then
315  write (this%imon, '(4x,a,i0)') "sending to process: ", rnk
316  end if
317 
318  call mpi_type_size(body_snd_t(i), msg_size, ierr)
319  if (msg_size > 0) then
320  call mpi_isend(mpi_bottom, 1, body_snd_t(i), rnk, stage, &
321  this%mpi_world%comm, snd_req(i), ierr)
322  call check_mpi(ierr)
323  end if
324 
325  if (this%enable_monitor) then
326  write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
327  end if
328  call flush (this%imon)
329  end do
330 
331  ! wait for exchange of all messages
332  call g_prof%start("MPI_WaitAll ("//trim(stg_to_str(stage))//")", &
333  this%tmr_mpi_wait(stage, unit + 1))
334  call mpi_waitall(this%senders%size, rcv_req, rcv_stat, ierr)
335  call g_prof%stop(this%tmr_mpi_wait(stage, unit + 1))
336  call check_mpi(ierr)
337 
338  deallocate (rcv_req, snd_req, rcv_stat)
339  deallocate (body_rcv_t, body_snd_t)
340 
341  end subroutine route_active
342 
343  !> @brief Constructs the message bodies' MPI datatypes.
344  !!
345  !! Constructs the message bodies' MPI datatypes for a
346  !! unit (a solution) and a given stage. This is done in
347  !! a sequence of 6 steps (distributed over 3 phases):
348  !!
349  !! == synchronizing headers (VdcHeaderType) ==
350  !!
351  !! step 1:
352  !! Receive headers from remote addresses requesting data
353  !! from virtual data containers (models, exchanges, ...) local to this process
354  !! step 2:
355  !! Send headers to remote addresses to indicate for which
356  !! virtual data containers (models, exchanges, ...) data is requested
357  !!
358  !! == synchronizing maps (VdcReceiverMapsType) ==
359  !!
360  !! step 3:
361  !! Based on the received headers, receive element maps (which elements are
362  !! to be sent from a contiguous array) for outgoing data
363  !! step 4:
364  !! Send element maps to remote addresses to specify incoming data
365  !!
366  !! == construct message body data types (VirtualDataContainerType) ==
367  !!
368  !! step 5:
369  !! Construct the message receive body based on the virtual
370  !! data items in the virtual data containers, and cache
371  !!
372  !! step 6:
373  !! Construct the message send body, based on received header data and
374  !! and maps, from the virtual data containers, and cache
375  !<
376  subroutine compose_messages(this, unit, stage, body_snd_t, body_rcv_t)
377  class(mpiroutertype) :: this
378  integer(I4B) :: unit
379  integer(I4B) :: stage
380  integer, dimension(:) :: body_snd_t
381  integer, dimension(:) :: body_rcv_t
382  ! local
383  integer(I4B) :: i, j, k
384  integer(I4B) :: rnk
385  integer :: ierr
386  ! mpi handles
387  integer, dimension(:), allocatable :: rcv_req
388  integer, dimension(:), allocatable :: snd_req
389  integer, dimension(:, :), allocatable :: rcv_stat
390  ! message header
391  integer(I4B) :: max_headers
392  type(vdcheadertype), dimension(:, :), allocatable :: headers
393  integer, dimension(:), allocatable :: hdr_rcv_t
394  integer, dimension(:), allocatable :: hdr_snd_t
395  integer, dimension(:), allocatable :: hdr_rcv_cnt
396  ! maps
397  type(vdcreceivermapstype), dimension(:, :), allocatable :: rcv_maps
398  integer, dimension(:), allocatable :: map_rcv_t
399  integer, dimension(:), allocatable :: map_snd_t
400 
401  ! allocate handles
402  allocate (rcv_req(this%receivers%size))
403  allocate (snd_req(this%senders%size))
404  allocate (rcv_stat(mpi_status_size, this%receivers%size))
405 
406  ! init handles
407  rcv_req = mpi_request_null
408  snd_req = mpi_request_null
409 
410  ! allocate header data
411  max_headers = size(this%rte_models) + size(this%rte_exchanges)
412  allocate (hdr_rcv_t(this%receivers%size))
413  allocate (hdr_snd_t(this%senders%size))
414  allocate (headers(max_headers, this%receivers%size))
415  allocate (hdr_rcv_cnt(this%receivers%size))
416 
417  ! allocate map data
418  allocate (map_snd_t(this%senders%size))
419  allocate (map_rcv_t(this%receivers%size))
420  allocate (rcv_maps(max_headers, this%receivers%size)) ! for every header, we potentially need the maps
421 
422  if (this%enable_monitor) then
423  write (this%imon, '(2x,a)') "== communicating headers =="
424  end if
425 
426  ! first receive headers for outward data
427  do i = 1, this%receivers%size
428  rnk = this%receivers%at(i)
429  if (this%enable_monitor) then
430  write (this%imon, '(4x,a,i0)') "Ireceive header from process: ", rnk
431  end if
432  call this%message_builder%create_header_rcv(hdr_rcv_t(i))
433  call mpi_irecv(headers(:, i), max_headers, hdr_rcv_t(i), rnk, stage, &
434  this%mpi_world%comm, rcv_req(i), ierr)
435  call check_mpi(ierr)
436  end do
437 
438  ! send header for incoming data
439  do i = 1, this%senders%size
440  rnk = this%senders%at(i)
441  if (this%enable_monitor) then
442  write (this%imon, '(4x,a,i0)') "send header to process: ", rnk
443  end if
444  call this%message_builder%create_header_snd(rnk, stage, hdr_snd_t(i))
445  call mpi_isend(mpi_bottom, 1, hdr_snd_t(i), rnk, stage, &
446  this%mpi_world%comm, snd_req(i), ierr)
447  call check_mpi(ierr)
448  end do
449 
450  ! wait for exchange of all headers
451  call g_prof%start("MPI_WaitAll ("//trim(stg_to_str(stage))//")", &
452  this%tmr_mpi_wait(stage, unit + 1))
453  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
454  call g_prof%stop(this%tmr_mpi_wait(stage, unit + 1))
455  call check_mpi(ierr)
456 
457  ! reinit handles
458  rcv_req = mpi_request_null
459  snd_req = mpi_request_null
460 
461  ! after WaitAll we can count incoming headers from statuses
462  do i = 1, this%receivers%size
463  call mpi_get_count(rcv_stat(:, i), hdr_rcv_t(i), hdr_rcv_cnt(i), ierr)
464 
465  if (this%enable_monitor) then
466  rnk = this%senders%at(i)
467  write (this%imon, '(4x,a,i0)') "received headers from process: ", rnk
468  write (this%imon, '(6x,a)') "expecting data for:"
469  do j = 1, hdr_rcv_cnt(i)
470  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
471  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
472  write (this%imon, '(6x,a,99i6)') "map sizes: ", headers(j, i)%map_sizes
473  end do
474  end if
475  end do
476 
477  ! clean up types
478  do i = 1, this%receivers%size
479  call mpi_type_free(hdr_rcv_t(i), ierr)
480  end do
481  do i = 1, this%senders%size
482  call mpi_type_free(hdr_snd_t(i), ierr)
483  end do
484 
485  if (this%enable_monitor) then
486  write (this%imon, '(2x,a)') "== communicating maps =="
487  end if
488 
489  ! allocate space for receiving maps
490  do i = 1, this%receivers%size
491  do j = 1, hdr_rcv_cnt(i)
492  call rcv_maps(j, i)%create(headers(j, i)%map_sizes)
493  end do
494  end do
495 
496  ! receive maps
497  do i = 1, this%receivers%size
498  rnk = this%receivers%at(i)
499  if (this%enable_monitor) then
500  write (this%imon, '(4x,a,i0)') "Ireceive maps from process: ", rnk
501  end if
502 
503  call this%message_builder%create_map_rcv(rcv_maps(:, i), hdr_rcv_cnt(i), &
504  map_rcv_t(i))
505  call mpi_irecv(mpi_bottom, 1, map_rcv_t(i), rnk, stage, &
506  this%mpi_world%comm, rcv_req(i), ierr)
507  call check_mpi(ierr)
508  end do
509 
510  ! send maps
511  do i = 1, this%senders%size
512  rnk = this%senders%at(i)
513  if (this%enable_monitor) then
514  write (this%imon, '(4x,a,i0)') "send map to process: ", rnk
515  end if
516 
517  call this%message_builder%create_map_snd(rnk, stage, map_snd_t(i))
518  call mpi_isend(mpi_bottom, 1, map_snd_t(i), rnk, stage, &
519  this%mpi_world%comm, snd_req(i), ierr)
520  call check_mpi(ierr)
521  end do
522 
523  ! wait on receiving maps
524  call g_prof%start("MPI_WaitAll ("//trim(stg_to_str(stage))//")", &
525  this%tmr_mpi_wait(stage, unit + 1))
526  call mpi_waitall(this%receivers%size, rcv_req, rcv_stat, ierr)
527  call g_prof%stop(this%tmr_mpi_wait(stage, unit + 1))
528  call check_mpi(ierr)
529 
530  ! print maps
531  if (this%enable_monitor) then
532  do i = 1, this%receivers%size
533  rnk = this%receivers%at(i)
534  write (this%imon, '(4x,a,i0)') "received maps from process: ", rnk
535  do j = 1, hdr_rcv_cnt(i)
536  write (this%imon, '(6x,a,i0,a,a)') "id: ", headers(j, i)%id, &
537  " type: ", trim(vdc_type_to_str(headers(j, i)%container_type))
538  do k = 1, nr_vdc_element_maps
539  write (this%imon, '(8x,i0, a,i0)') k, " nr. elements: ", &
540  rcv_maps(j, i)%el_maps(k)%nr_virt_elems
541  if (rcv_maps(j, i)%el_maps(k)%nr_virt_elems > 0) then
542  write (this%imon, '(8x,*(i6))') &
543  rcv_maps(j, i)%el_maps(k)%remote_elem_shift
544  end if
545  end do
546  end do
547  end do
548  end if
549 
550  ! clean up types
551  do i = 1, this%receivers%size
552  call mpi_type_free(map_rcv_t(i), ierr)
553  end do
554  do i = 1, this%senders%size
555  call mpi_type_free(map_snd_t(i), ierr)
556  end do
557 
558  if (this%enable_monitor) then
559  write (this%imon, '(2x,a)') "== composing message bodies =="
560  end if
561 
562  ! construct recv bodies and cache
563  do i = 1, this%senders%size
564  rnk = this%senders%at(i)
565  if (this%enable_monitor) then
566  write (this%imon, '(4x,a,i0)') "build recv body for process: ", rnk
567  end if
568 
569  call this%message_builder%create_body_rcv(rnk, stage, body_rcv_t(i))
570  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_rcv, body_rcv_t(i))
571  end do
572 
573  ! construct send bodies and cache
574  do i = 1, this%receivers%size
575  rnk = this%receivers%at(i)
576  if (this%enable_monitor) then
577  write (this%imon, '(4x,a,i0)') "build send body for process: ", rnk
578  end if
579 
580  call this%message_builder%create_body_snd( &
581  rnk, stage, headers(1:hdr_rcv_cnt(i), i), &
582  rcv_maps(:, i), body_snd_t(i))
583  call this%msg_cache%put(unit, rnk, stage, mpi_bdy_snd, body_snd_t(i))
584  end do
585 
586  ! clean up element maps
587  do i = 1, this%receivers%size
588  do j = 1, hdr_rcv_cnt(i)
589  call rcv_maps(j, i)%destroy()
590  end do
591  end do
592 
593  deallocate (rcv_req, snd_req, rcv_stat)
594  deallocate (hdr_rcv_t, hdr_snd_t, hdr_rcv_cnt)
595  deallocate (headers)
596  deallocate (map_rcv_t, map_snd_t)
597  deallocate (rcv_maps)
598 
599  end subroutine compose_messages
600 
601  !> @brief Load the message body MPI datatypes from cache
602  !<
603  subroutine load_messages(this, unit, stage, body_snd_t, body_rcv_t)
604  class(mpiroutertype) :: this
605  integer(I4B) :: unit
606  integer(I4B) :: stage
607  integer, dimension(:), allocatable :: body_snd_t
608  integer, dimension(:), allocatable :: body_rcv_t
609  ! local
610  integer(I4B) :: i, rnk
611 
612  if (this%enable_monitor) then
613  write (this%imon, '(2x,a)') "... running from cache ..."
614  end if
615 
616  do i = 1, this%receivers%size
617  rnk = this%receivers%at(i)
618  body_snd_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
619  end do
620  do i = 1, this%senders%size
621  rnk = this%senders%at(i)
622  body_rcv_t(i) = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
623  end do
624 
625  end subroutine load_messages
626 
627  subroutine update_senders(this)
628  class(mpiroutertype) :: this
629  ! local
630  integer(I4B) :: i
631  class(virtualdatacontainertype), pointer :: vdc
632 
633  call this%senders%clear()
634 
635  do i = 1, size(this%rte_models)
636  vdc => this%rte_models(i)%ptr
637  if (.not. vdc%is_local .and. vdc%is_active) then
638  call this%senders%push_back_unique(vdc%orig_rank)
639  end if
640  end do
641  do i = 1, size(this%rte_exchanges)
642  vdc => this%rte_exchanges(i)%ptr
643  if (.not. vdc%is_local .and. vdc%is_active) then
644  call this%senders%push_back_unique(vdc%orig_rank)
645  end if
646  end do
647 
648  end subroutine update_senders
649 
650  subroutine update_receivers(this)
651  class(mpiroutertype) :: this
652  ! local
653  integer(I4B) :: i, j
654  class(virtualdatacontainertype), pointer :: vdc
655 
656  call this%receivers%clear()
657 
658  if (.not. this%halo_activated) then
659  ! assuming symmetry for now
660  do i = 1, this%senders%size
661  call this%receivers%push_back(this%senders%at(i))
662  end do
663  else
664  ! get the receivers from the VDCs
665  do i = 1, size(this%rte_models)
666  vdc => this%rte_models(i)%ptr
667  do j = 1, vdc%rcv_ranks%size
668  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
669  end do
670  end do
671  do i = 1, size(this%rte_exchanges)
672  vdc => this%rte_exchanges(i)%ptr
673  do j = 1, vdc%rcv_ranks%size
674  call this%receivers%push_back_unique(vdc%rcv_ranks%at(j))
675  end do
676  end do
677  end if
678 
679  end subroutine update_receivers
680 
681  !> @brief Check if this stage is cached
682  !<
683  function is_cached(this, unit, stage) result(in_cache)
684  use simmodule, only: ustop
685  class(mpiroutertype) :: this
686  integer(I4B) :: unit
687  integer(I4B) :: stage
688  logical(LGP) :: in_cache
689  ! local
690  integer(I4B) :: i, rnk
691  integer(I4B) :: no_cache_cnt
692  integer :: cached_type
693 
694  in_cache = .false.
695  no_cache_cnt = 0
696 
697  do i = 1, this%receivers%size
698  rnk = this%receivers%at(i)
699  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_snd)
700  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
701  end do
702  do i = 1, this%senders%size
703  rnk = this%senders%at(i)
704  cached_type = this%msg_cache%get(unit, rnk, stage, mpi_bdy_rcv)
705  if (cached_type == no_cached_value) no_cache_cnt = no_cache_cnt + 1
706  end do
707 
708  ! it should be all or nothing
709  if (no_cache_cnt == this%receivers%size + this%senders%size) then
710  in_cache = .false.
711  else if (no_cache_cnt == 0) then
712  in_cache = .true.
713  else
714  call ustop("Internal error: MPI message cache corrupt...")
715  end if
716 
717  end function is_cached
718 
719  subroutine mr_finalize(this)
720  class(mpiroutertype) :: this
721 
722  call this%msg_cache%clear()
723 
724  end subroutine mr_finalize
725 
726  subroutine mr_destroy(this)
727  class(mpiroutertype) :: this
728 
729  call this%msg_cache%destroy()
730 
731  call this%senders%destroy()
732  call this%receivers%destroy()
733 
734  deallocate (this%model_proc_ids)
735  deallocate (this%all_models)
736  deallocate (this%all_exchanges)
737 
738  deallocate (this%tmr_mpi_wait)
739 
740  end subroutine mr_destroy
741 
742 end module mpiroutermodule
This module contains simulation constants.
Definition: Constants.f90:9
integer(i4b), parameter linelength
maximum length of a standard line
Definition: Constants.f90:45
integer(i4b) function, public getunit()
Get a free unit number.
This module defines variable data types.
Definition: kind.f90:8
subroutine, public mem_print_detailed(iout)
integer(i4b), parameter, public mpi_bdy_snd
sending data (body) to ranks
integer(i4b), parameter, public mpi_bdy_rcv
receiving data (body) from ranks
subroutine mr_route_sln(this, virtual_sol, stage)
This will route all remote data from models and exchanges in a particular solution over MPI,...
Definition: MpiRouter.f90:213
subroutine route_active(this, unit, stage)
Routes the models and exchanges over MPI, either constructing the message bodies in a sequence of com...
Definition: MpiRouter.f90:238
subroutine compose_messages(this, unit, stage, body_snd_t, body_rcv_t)
Constructs the message bodies' MPI datatypes.
Definition: MpiRouter.f90:377
subroutine load_messages(this, unit, stage, body_snd_t, body_rcv_t)
Load the message body MPI datatypes from cache.
Definition: MpiRouter.f90:604
class(routerbasetype) function, pointer, public create_mpi_router()
Factory method to create MPI router.
Definition: MpiRouter.f90:59
logical(lgp) function is_cached(this, unit, stage)
Check if this stage is cached.
Definition: MpiRouter.f90:684
subroutine mr_route_all(this, stage)
This will route all remote data from the global models and exchanges over MPI, for a.
Definition: MpiRouter.f90:189
subroutine activate(this, models, exchanges)
Activate models and exchanges for routing.
Definition: MpiRouter.f90:164
subroutine mr_finalize(this)
Definition: MpiRouter.f90:720
subroutine deactivate(this)
Deactivate data after routing.
Definition: MpiRouter.f90:177
subroutine update_receivers(this)
Definition: MpiRouter.f90:651
subroutine update_senders(this)
Definition: MpiRouter.f90:628
subroutine mr_destroy(this)
Definition: MpiRouter.f90:727
subroutine mr_initialize(this)
Definition: MpiRouter.f90:69
type(mpiworldtype) function, pointer, public get_mpi_world()
Definition: MpiWorld.f90:32
subroutine, public check_mpi(mpi_error_code)
Check the MPI error code, report, and.
Definition: MpiWorld.f90:116
type(profilertype), public g_prof
the global timer object (to reduce trivial lines of code)
Definition: Profiler.f90:65
This module contains simulation methods.
Definition: Sim.f90:10
subroutine, public ustop(stopmess, ioutlocal)
Stop the simulation.
Definition: Sim.f90:312
subroutine, public store_error(msg, terminate)
Store an error message.
Definition: Sim.f90:92
integer(i4b), parameter, public nr_sim_stages
before exchange formulate (per solution)
Definition: SimStages.f90:24
character(len=24) function, public stg_to_str(stage)
Converts a stage to its string representation.
Definition: SimStages.f90:31
This module contains simulation variables.
Definition: SimVariables.f90:9
integer(i4b) nr_procs
integer(i4b) proc_id
integer(i4b), parameter, public nr_vdc_element_maps
Definition: VirtualBase.f90:16
character(len=24) function, public vdc_type_to_str(cntr_type)
@ Converts a virtual container type to its string representation
class(virtualdatacontainertype) function, pointer, public get_vdc_from_list(list, idx)
type(listtype), public virtual_model_list
type(listtype), public virtual_exchange_list
Facility to cache the constructed MPI datatypes. This will avoid having to construct them over and ov...
Wrapper for virtual data containers.
The Virtual Exchange is based on two Virtual Models and is therefore not always strictly local or rem...
This bundles all virtual data for a particular solution.