LMDZ
parallel_lmdz.F90
Go to the documentation of this file.
1 !
2 ! $Id$
3 !
4  MODULE parallel_lmdz
5  USE mod_const_mpi
6 #ifdef CPP_IOIPSL
7  use ioipsl
8 #else
9 ! if not using IOIPSL, we still need to use (a local version of) getin
10  use ioipsl_getincom
11 #endif
12  INTEGER,PARAMETER :: halo_max=3
13 
14  LOGICAL,SAVE :: using_mpi
15  LOGICAL,SAVE :: using_omp
16 
17  integer, save :: mpi_size
18  integer, save :: mpi_rank
19  integer, save :: jj_begin
20  integer, save :: jj_end
21  integer, save :: jj_nb
22  integer, save :: ij_begin
23  integer, save :: ij_end
24  logical, save :: pole_nord
25  logical, save :: pole_sud
26 
27  integer,save :: jjb_u
28  integer,save :: jje_u
29  integer,save :: jjnb_u
30  integer,save :: jjb_v
31  integer,save :: jje_v
32  integer,save :: jjnb_v
33 
34  integer,save :: ijb_u
35  integer,save :: ije_u
36  integer,save :: ijnb_u
37 
38  integer,save :: ijb_v
39  integer,save :: ije_v
40  integer,save :: ijnb_v
41 
42 
43  integer, allocatable, save, dimension(:) :: jj_begin_para
44  integer, allocatable, save, dimension(:) :: jj_end_para
45  integer, allocatable, save, dimension(:) :: jj_nb_para
46  integer, save :: omp_chunk
47  integer, save :: omp_rank
48  integer, save :: omp_size
49 !$OMP THREADPRIVATE(omp_rank)
50 
51  TYPE distrib
52  integer :: jj_begin
53  integer :: jj_end
54  integer :: jj_nb
55  integer :: ij_begin
56  integer :: ij_end
57 
58  integer :: jjb_u
59  integer :: jje_u
60  integer :: jjnb_u
61  integer :: jjb_v
62  integer :: jje_v
63  integer :: jjnb_v
64 
65  integer :: ijb_u
66  integer :: ije_u
67  integer :: ijnb_u
68 
69  integer :: ijb_v
70  integer :: ije_v
71  integer :: ijnb_v
72 
73 
74  integer, pointer :: jj_begin_para(:) => null()
75  integer, pointer :: jj_end_para(:) => null()
76  integer, pointer :: jj_nb_para(:) => null()
77  END TYPE distrib
78 
79  INTERFACE assignment (=)
80  MODULE PROCEDURE copy_distrib
81  END INTERFACE
82  TYPE(distrib),SAVE :: current_dist
83 
84  contains
85 
86  subroutine init_parallel
87  USE vampir
88  implicit none
89 #ifdef CPP_MPI
90  include 'mpif.h'
91 #endif
92 #include "dimensions.h"
93 #include "paramet.h"
94 #include "iniprint.h"
95 
96  integer :: ierr
97  integer :: i,j
98  integer :: type_size
99  integer, dimension(3) :: blocklen,type
100  integer :: comp_id
101  character(len=4) :: num
102  character(len=20) :: filename
103 
104 #ifdef CPP_OMP
105  INTEGER :: OMP_GET_NUM_THREADS
106  EXTERNAL omp_get_num_threads
107  INTEGER :: OMP_GET_THREAD_NUM
108  EXTERNAL omp_get_thread_num
109 #endif
110 
111 #ifdef CPP_MPI
112  using_mpi=.true.
113 #else
114  using_mpi=.false.
115 #endif
116 
117 
118 #ifdef CPP_OMP
119  using_omp=.true.
120 #else
121  using_omp=.false.
122 #endif
123 
124  call initvampir
125 
126  IF (using_mpi) THEN
127 #ifdef CPP_MPI
128  call mpi_comm_size(comm_lmdz,mpi_size,ierr)
129  call mpi_comm_rank(comm_lmdz,mpi_rank,ierr)
130 #endif
131  ELSE
132  mpi_size=1
133  mpi_rank=0
134  ENDIF
135 
136 
137 ! Open text output file with mpi_rank in suffix of file name
138  IF (lunout /= 5 .and. lunout /= 6) THEN
139  WRITE(num,'(I4.4)') mpi_rank
140  filename='lmdz.out_'//num
141  IF (mpi_rank .NE. 0) THEN
142  OPEN(unit=lunout,file=trim(filename),action='write', &
143  status='unknown',form='formatted',iostat=ierr)
144  ENDIF
145  ENDIF
146 
147 
148  allocate(jj_begin_para(0:mpi_size-1))
149  allocate(jj_end_para(0:mpi_size-1))
150  allocate(jj_nb_para(0:mpi_size-1))
151 
152  do i=0,mpi_size-1
153  jj_nb_para(i)=(jjm+1)/mpi_size
154  if ( i < mod((jjm+1),mpi_size) ) jj_nb_para(i)=jj_nb_para(i)+1
155 
156  if (jj_nb_para(i) <= 2 ) then
157 
158  write(lunout,*)"Arret : le nombre de bande de lattitude par process est trop faible (<2)."
159  write(lunout,*)" ---> diminuez le nombre de CPU ou augmentez la taille en lattitude"
160 
161 #ifdef CPP_MPI
162  IF (using_mpi) call mpi_abort(comm_lmdz,-1, ierr)
163 #endif
164  endif
165 
166  enddo
167 
168 ! jj_nb_para(0)=11
169 ! jj_nb_para(1)=25
170 ! jj_nb_para(2)=25
171 ! jj_nb_para(3)=12
172 
173  j=1
174 
175  do i=0,mpi_size-1
176 
177  jj_begin_para(i)=j
178  jj_end_para(i)=j+jj_nb_para(i)-1
179  j=j+jj_nb_para(i)
180 
181  enddo
182 
186 
187  ij_begin=(jj_begin-1)*iip1+1
188  ij_end=jj_end*iip1
189 
190  if (mpi_rank.eq.0) then
191  pole_nord=.true.
192  else
193  pole_nord=.false.
194  endif
195 
196  if (mpi_rank.eq.mpi_size-1) then
197  pole_sud=.true.
198  else
199  pole_sud=.false.
200  endif
201 
202  write(lunout,*)"init_parallel: jj_begin",jj_begin
203  write(lunout,*)"init_parallel: jj_end",jj_end
204  write(lunout,*)"init_parallel: ij_begin",ij_begin
205  write(lunout,*)"init_parallel: ij_end",ij_end
206  jjb_u=max(jj_begin-halo_max,1)
208  jjnb_u=jje_u-jjb_u+1
209 
210  jjb_v=max(jj_begin-halo_max,1)
211  jje_v=min(jj_end+halo_max,jjm)
212  jjnb_v=jje_v-jjb_v+1
213 
214  ijb_u=max(ij_begin-halo_max*iip1,1)
215  ije_u=min(ij_end+halo_max*iip1,ip1jmp1)
216  ijnb_u=ije_u-ijb_u+1
217 
218  ijb_v=max(ij_begin-halo_max*iip1,1)
219  ije_v=min(ij_end+halo_max*iip1,ip1jm)
220  ijnb_v=ije_v-ijb_v+1
221 
222 !$OMP PARALLEL
223 
224 #ifdef CPP_OMP
225 !$OMP MASTER
226  omp_size=omp_get_num_threads()
227 !$OMP END MASTER
228 !$OMP BARRIER
229  omp_rank=omp_get_thread_num()
230 
231 !Config Key = omp_chunk
232 !Config Desc = taille des blocs openmp
233 !Config Def = 1
234 !Config Help = defini la taille des packets d'it�ration openmp
235 !Config distribue a chaque tache lors de l'entree dans une
236 !Config boucle parallelisee
237 
238 !$OMP MASTER
240  IF (mod(llm+1,omp_size)/=0) omp_chunk=omp_chunk+1
241  CALL getin('omp_chunk',omp_chunk)
242 !$OMP END MASTER
243 !$OMP BARRIER
244 #else
245  omp_size=1
246  omp_rank=0
247 #endif
248 !$OMP END PARALLEL
250 
251  end subroutine init_parallel
252 
253  SUBROUTINE create_distrib(jj_nb_new,d)
254  IMPLICIT NONE
255  include "dimensions.h"
256  include "paramet.h"
257 
258  INTEGER,INTENT(IN) :: jj_Nb_New(0:mpi_size-1)
259  TYPE(distrib),INTENT(INOUT) :: d
260  INTEGER :: i
261 
262  IF (.NOT. ASSOCIATED(d%jj_nb_para)) ALLOCATE(d%jj_nb_para(0:mpi_size-1))
263  IF (.NOT. ASSOCIATED(d%jj_begin_para)) ALLOCATE(d%jj_begin_para(0:mpi_size-1))
264  IF (.NOT. ASSOCIATED(d%jj_end_para)) ALLOCATE(d%jj_end_para(0:mpi_size-1))
265 
266  d%jj_Nb_Para=jj_nb_new
267 
268  d%jj_begin_para(0)=1
269  d%jj_end_para(0)=d%jj_Nb_Para(0)
270 
271  do i=1,mpi_size-1
272 
273  d%jj_begin_para(i)=d%jj_end_para(i-1)+1
274  d%jj_end_para(i)=d%jj_begin_para(i)+d%jj_Nb_para(i)-1
275 
276  enddo
277 
278  d%jj_begin = d%jj_begin_para(mpi_rank)
279  d%jj_end = d%jj_end_para(mpi_rank)
280  d%jj_nb = d%jj_nb_para(mpi_rank)
281 
282  d%ij_begin=(d%jj_begin-1)*iip1+1
283  d%ij_end=d%jj_end*iip1
284 
285  d%jjb_u=max(d%jj_begin-halo_max,1)
286  d%jje_u=min(d%jj_end+halo_max,jjp1)
287  d%jjnb_u=d%jje_u-d%jjb_u+1
288 
289  d%jjb_v=max(d%jj_begin-halo_max,1)
290  d%jje_v=min(d%jj_end+halo_max,jjm)
291  d%jjnb_v=d%jje_v-d%jjb_v+1
292 
293  d%ijb_u=max(d%ij_begin-halo_max*iip1,1)
294  d%ije_u=min(d%ij_end+halo_max*iip1,ip1jmp1)
295  d%ijnb_u=d%ije_u-d%ijb_u+1
296 
297  d%ijb_v=max(d%ij_begin-halo_max*iip1,1)
298  d%ije_v=min(d%ij_end+halo_max*iip1,ip1jm)
299  d%ijnb_v=d%ije_v-d%ijb_v+1
300 
301  END SUBROUTINE create_distrib
302 
303 
304  SUBROUTINE set_distrib(d)
305  IMPLICIT NONE
306 
307  include "dimensions.h"
308  include "paramet.h"
309  TYPE(distrib),INTENT(IN) :: d
310 
311  jj_begin = d%jj_begin
312  jj_end = d%jj_end
313  jj_nb = d%jj_nb
314  ij_begin = d%ij_begin
315  ij_end = d%ij_end
316 
317  jjb_u = d%jjb_u
318  jje_u = d%jje_u
319  jjnb_u = d%jjnb_u
320  jjb_v = d%jjb_v
321  jje_v = d%jje_v
322  jjnb_v = d%jjnb_v
323 
324  ijb_u = d%ijb_u
325  ije_u = d%ije_u
326  ijnb_u = d%ijnb_u
327 
328  ijb_v = d%ijb_v
329  ije_v = d%ije_v
330  ijnb_v = d%ijnb_v
331 
332 
333  jj_begin_para(:) = d%jj_begin_para(:)
334  jj_end_para(:) = d%jj_end_para(:)
335  jj_nb_para(:) = d%jj_nb_para(:)
336  current_dist=d
337 
338  END SUBROUTINE set_distrib
339 
340  SUBROUTINE copy_distrib(dist,new_dist)
341  IMPLICIT NONE
342 
343  include "dimensions.h"
344  include "paramet.h"
345  TYPE(distrib),INTENT(INOUT) :: dist
346  TYPE(distrib),INTENT(IN) :: new_dist
347 
348  dist%jj_begin = new_dist%jj_begin
349  dist%jj_end = new_dist%jj_end
350  dist%jj_nb = new_dist%jj_nb
351  dist%ij_begin = new_dist%ij_begin
352  dist%ij_end = new_dist%ij_end
353 
354  dist%jjb_u = new_dist%jjb_u
355  dist%jje_u = new_dist%jje_u
356  dist%jjnb_u = new_dist%jjnb_u
357  dist%jjb_v = new_dist%jjb_v
358  dist%jje_v = new_dist%jje_v
359  dist%jjnb_v = new_dist%jjnb_v
360 
361  dist%ijb_u = new_dist%ijb_u
362  dist%ije_u = new_dist%ije_u
363  dist%ijnb_u = new_dist%ijnb_u
364 
365  dist%ijb_v = new_dist%ijb_v
366  dist%ije_v = new_dist%ije_v
367  dist%ijnb_v = new_dist%ijnb_v
368 
369 
370  dist%jj_begin_para(:) = new_dist%jj_begin_para(:)
371  dist%jj_end_para(:) = new_dist%jj_end_para(:)
372  dist%jj_nb_para(:) = new_dist%jj_nb_para(:)
373 
374  END SUBROUTINE copy_distrib
375 
376 
377  SUBROUTINE get_current_distrib(d)
378  IMPLICIT NONE
379 
380  include "dimensions.h"
381  include "paramet.h"
382  TYPE(distrib),INTENT(OUT) :: d
383 
384  d=current_dist
385 
386  END SUBROUTINE get_current_distrib
387 
388  subroutine finalize_parallel
389 #ifdef CPP_XIOS
390  ! ug Pour les sorties XIOS
391  USE wxios
392 #endif
393 #ifdef CPP_COUPLE
394 ! Use of Oasis-MCT coupler
395 #if defined CPP_OMCT
396  use mod_prism
397 #else
398  use mod_prism_proto
399 #endif
400 ! Ehouarn: surface_data module is in 'phylmd' ...
401  use surface_data, only : type_ocean
402  implicit none
403 #else
404  implicit none
405 ! without the surface_data module, we declare (and set) a dummy 'type_ocean'
406  character(len=6),parameter :: type_ocean="dummy"
407 #endif
408 ! #endif of #ifdef CPP_EARTH
409 
410  include "dimensions.h"
411  include "paramet.h"
412 #ifdef CPP_MPI
413  include 'mpif.h'
414 #endif
415 
416  integer :: ierr
417  integer :: i
418 
419  if (allocated(jj_begin_para)) deallocate(jj_begin_para)
420  if (allocated(jj_end_para)) deallocate(jj_end_para)
421  if (allocated(jj_nb_para)) deallocate(jj_nb_para)
422 
423  if (type_ocean == 'couple') then
424 #ifdef CPP_XIOS
425  !Fermeture propre de XIOS
426  CALL wxios_close()
427 #else
428 #ifdef CPP_COUPLE
429  call prism_terminate_proto(ierr)
430  IF (ierr .ne. prism_ok) THEN
431  call abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
432  endif
433 #endif
434 #endif
435  else
436 #ifdef CPP_XIOS
437  !Fermeture propre de XIOS
438  CALL wxios_close()
439 #endif
440 #ifdef CPP_MPI
441  IF (using_mpi) call mpi_finalize(ierr)
442 #endif
443  end if
444 
445  end subroutine finalize_parallel
446 
447  subroutine pack_data(Field,ij,ll,row,Buffer)
448  implicit none
449 
450 #include "dimensions.h"
451 #include "paramet.h"
452 
453  integer, intent(in) :: ij,ll,row
454  real,dimension(ij,ll),intent(in) ::Field
455  real,dimension(ll*iip1*row), intent(out) :: Buffer
456 
457  integer :: Pos
458  integer :: i,l
459 
460  pos=0
461  do l=1,ll
462  do i=1,row*iip1
463  pos=pos+1
464  buffer(pos)=field(i,l)
465  enddo
466  enddo
467 
468  end subroutine pack_data
469 
470  subroutine unpack_data(Field,ij,ll,row,Buffer)
471  implicit none
472 
473 #include "dimensions.h"
474 #include "paramet.h"
475 
476  integer, intent(in) :: ij,ll,row
477  real,dimension(ij,ll),intent(out) ::Field
478  real,dimension(ll*iip1*row), intent(in) :: Buffer
479 
480  integer :: Pos
481  integer :: i,l
482 
483  pos=0
484 
485  do l=1,ll
486  do i=1,row*iip1
487  pos=pos+1
488  field(i,l)=buffer(pos)
489  enddo
490  enddo
491 
492  end subroutine unpack_data
493 
494 
495  SUBROUTINE barrier
496  IMPLICIT NONE
497 #ifdef CPP_MPI
498  include 'mpif.h'
499 #endif
500  INTEGER :: ierr
501 
502 !$OMP CRITICAL (MPI)
503 #ifdef CPP_MPI
504  IF (using_mpi) CALL mpi_barrier(comm_lmdz,ierr)
505 #endif
506 !$OMP END CRITICAL (MPI)
507 
508  END SUBROUTINE barrier
509 
510 
511  subroutine exchange_hallo(Field,ij,ll,up,down)
512  USE vampir
513  implicit none
514 #include "dimensions.h"
515 #include "paramet.h"
516 #ifdef CPP_MPI
517  include 'mpif.h'
518 #endif
519  INTEGER :: ij,ll
520  REAL, dimension(ij,ll) :: Field
521  INTEGER :: up,down
522 
523  INTEGER :: ierr
524  LOGICAL :: SendUp,SendDown
525  LOGICAL :: RecvUp,RecvDown
526  INTEGER, DIMENSION(4) :: Request
527 #ifdef CPP_MPI
528  INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: Status
529 #else
530  INTEGER, DIMENSION(1,4) :: Status
531 #endif
532  INTEGER :: NbRequest
533  REAL, dimension(:),allocatable :: Buffer_Send_up,Buffer_Send_down
534  REAL, dimension(:),allocatable :: Buffer_Recv_up,Buffer_Recv_down
535  INTEGER :: Buffer_size
536 
537  IF (using_mpi) THEN
538 
539  CALL barrier
540 
541  call vtb(vthallo)
542 
543  sendup=.true.
544  senddown=.true.
545  recvup=.true.
546  recvdown=.true.
547 
548  IF (pole_nord) THEN
549  sendup=.false.
550  recvup=.false.
551  ENDIF
552 
553  IF (pole_sud) THEN
554  senddown=.false.
555  recvdown=.false.
556  ENDIF
557 
558  if (up.eq.0) then
559  senddown=.false.
560  recvup=.false.
561  endif
562 
563  if (down.eq.0) then
564  sendup=.false.
565  recvdown=.false.
566  endif
567 
568  nbrequest=0
569 
570  IF (sendup) THEN
571  nbrequest=nbrequest+1
572  buffer_size=down*iip1*ll
573  allocate(buffer_send_up(buffer_size))
574  call pack_data(field(ij_begin,1),ij,ll,down,buffer_send_up)
575 !$OMP CRITICAL (MPI)
576 #ifdef CPP_MPI
577  call mpi_issend(buffer_send_up,buffer_size,mpi_real8,mpi_rank-1,1, &
578  comm_lmdz,request(nbrequest),ierr)
579 #endif
580 !$OMP END CRITICAL (MPI)
581  ENDIF
582 
583  IF (senddown) THEN
584  nbrequest=nbrequest+1
585 
586  buffer_size=up*iip1*ll
587  allocate(buffer_send_down(buffer_size))
588  call pack_data(field(ij_end+1-up*iip1,1),ij,ll,up,buffer_send_down)
589 
590 !$OMP CRITICAL (MPI)
591 #ifdef CPP_MPI
592  call mpi_issend(buffer_send_down,buffer_size,mpi_real8,mpi_rank+1,1, &
593  comm_lmdz,request(nbrequest),ierr)
594 #endif
595 !$OMP END CRITICAL (MPI)
596  ENDIF
597 
598 
599  IF (recvup) THEN
600  nbrequest=nbrequest+1
601  buffer_size=up*iip1*ll
602  allocate(buffer_recv_up(buffer_size))
603 
604 !$OMP CRITICAL (MPI)
605 #ifdef CPP_MPI
606  call mpi_irecv(buffer_recv_up,buffer_size,mpi_real8,mpi_rank-1,1, &
607  comm_lmdz,request(nbrequest),ierr)
608 #endif
609 !$OMP END CRITICAL (MPI)
610 
611 
612  ENDIF
613 
614  IF (recvdown) THEN
615  nbrequest=nbrequest+1
616  buffer_size=down*iip1*ll
617  allocate(buffer_recv_down(buffer_size))
618 
619 !$OMP CRITICAL (MPI)
620 #ifdef CPP_MPI
621  call mpi_irecv(buffer_recv_down,buffer_size,mpi_real8,mpi_rank+1,1, &
622  comm_lmdz,request(nbrequest),ierr)
623 #endif
624 !$OMP END CRITICAL (MPI)
625 
626  ENDIF
627 
628 #ifdef CPP_MPI
629  if (nbrequest > 0) call mpi_waitall(nbrequest,request,status,ierr)
630 #endif
631  IF (recvup) call unpack_data(field(ij_begin-up*iip1,1),ij,ll,up,buffer_recv_up)
632  IF (recvdown) call unpack_data(field(ij_end+1,1),ij,ll,down,buffer_recv_down)
633 
634  call vte(vthallo)
635  call barrier
636 
637  ENDIF ! using_mpi
638 
639  RETURN
640 
641  end subroutine exchange_hallo
642 
643 
644  subroutine gather_field(Field,ij,ll,rank)
645  implicit none
646 #include "dimensions.h"
647 #include "paramet.h"
648 #include "iniprint.h"
649 #ifdef CPP_MPI
650  include 'mpif.h'
651 #endif
652  INTEGER :: ij,ll,rank
653  REAL, dimension(ij,ll) :: Field
654  REAL, dimension(:),allocatable :: Buffer_send
655  REAL, dimension(:),allocatable :: Buffer_Recv
656  INTEGER, dimension(0:MPI_Size-1) :: Recv_count, displ
657  INTEGER :: ierr
658  INTEGER ::i
659 
660  IF (using_mpi) THEN
661 
662  if (ij==ip1jmp1) then
663  allocate(buffer_send(iip1*ll*(jj_end-jj_begin+1)))
664  call pack_data(field(ij_begin,1),ij,ll,jj_end-jj_begin+1,buffer_send)
665  else if (ij==ip1jm) then
666  allocate(buffer_send(iip1*ll*(min(jj_end,jjm)-jj_begin+1)))
667  call pack_data(field(ij_begin,1),ij,ll,min(jj_end,jjm)-jj_begin+1,buffer_send)
668  else
669  write(lunout,*)ij
670  stop 'erreur dans Gather_Field'
671  endif
672 
673  if (mpi_rank==rank) then
674  allocate(buffer_recv(ij*ll))
675 
676 !CDIR NOVECTOR
677  do i=0,mpi_size-1
678 
679  if (ij==ip1jmp1) then
680  recv_count(i)=(jj_end_para(i)-jj_begin_para(i)+1)*ll*iip1
681  else if (ij==ip1jm) then
682  recv_count(i)=(min(jj_end_para(i),jjm)-jj_begin_para(i)+1)*ll*iip1
683  else
684  stop 'erreur dans Gather_Field'
685  endif
686 
687  if (i==0) then
688  displ(i)=0
689  else
690  displ(i)=displ(i-1)+recv_count(i-1)
691  endif
692 
693  enddo
694 
695  else
696  ! Ehouarn: When in debug mode, ifort complains (for call MPI_GATHERV
697  ! below) about Buffer_Recv() being not allocated.
698  ! So make a dummy allocation.
699  allocate(buffer_recv(1))
700  endif ! of if (MPI_Rank==rank)
701 
702 !$OMP CRITICAL (MPI)
703 #ifdef CPP_MPI
704  call mpi_gatherv(buffer_send,(min(ij_end,ij)-ij_begin+1)*ll,mpi_real8, &
705  buffer_recv,recv_count,displ,mpi_real8,rank,comm_lmdz,ierr)
706 #endif
707 !$OMP END CRITICAL (MPI)
708 
709  if (mpi_rank==rank) then
710 
711  if (ij==ip1jmp1) then
712  do i=0,mpi_size-1
713  call unpack_data(field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, &
714  jj_end_para(i)-jj_begin_para(i)+1,buffer_recv(displ(i)+1))
715  enddo
716  else if (ij==ip1jm) then
717  do i=0,mpi_size-1
718  call unpack_data(field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, &
719  min(jj_end_para(i),jjm)-jj_begin_para(i)+1,buffer_recv(displ(i)+1))
720  enddo
721  endif
722  endif
723  ENDIF ! using_mpi
724 
725  end subroutine gather_field
726 
727 
728  subroutine allgather_field(Field,ij,ll)
729  implicit none
730 #include "dimensions.h"
731 #include "paramet.h"
732 #ifdef CPP_MPI
733  include 'mpif.h'
734 #endif
735  INTEGER :: ij,ll
736  REAL, dimension(ij,ll) :: Field
737  INTEGER :: ierr
738 
739  IF (using_mpi) THEN
740  call gather_field(field,ij,ll,0)
741 !$OMP CRITICAL (MPI)
742 #ifdef CPP_MPI
743  call mpi_bcast(field,ij*ll,mpi_real8,0,comm_lmdz,ierr)
744 #endif
745 !$OMP END CRITICAL (MPI)
746  ENDIF
747 
748  end subroutine allgather_field
749 
750  subroutine broadcast_field(Field,ij,ll,rank)
751  implicit none
752 #include "dimensions.h"
753 #include "paramet.h"
754 #ifdef CPP_MPI
755  include 'mpif.h'
756 #endif
757  INTEGER :: ij,ll
758  REAL, dimension(ij,ll) :: Field
759  INTEGER :: rank
760  INTEGER :: ierr
761 
762  IF (using_mpi) THEN
763 
764 !$OMP CRITICAL (MPI)
765 #ifdef CPP_MPI
766  call mpi_bcast(field,ij*ll,mpi_real8,rank,comm_lmdz,ierr)
767 #endif
768 !$OMP END CRITICAL (MPI)
769 
770  ENDIF
771  end subroutine broadcast_field
772 
773 
774 ! Subroutine verif_hallo(Field,ij,ll,up,down)
775 ! implicit none
776 !#include "dimensions.h"
777 !#include "paramet.h"
778 ! include 'mpif.h'
779 !
780 ! INTEGER :: ij,ll
781 ! REAL, dimension(ij,ll) :: Field
782 ! INTEGER :: up,down
783 !
784 ! REAL,dimension(ij,ll): NewField
785 !
786 ! NewField=0
787 !
788 ! ijb=ij_begin
789 ! ije=ij_end
790 ! if (pole_nord)
791 ! NewField(ij_be
792 
793  end MODULE parallel_lmdz
subroutine barrier
!$Header llmm1 INTEGER ip1jmp1
Definition: paramet.h:14
subroutine gather_field(Field, ij, ll, rank)
integer, save jjb_u
integer, save jjb_v
Definition: vampir.F90:1
integer, save mpi_rank
integer, save jj_end
subroutine get_current_distrib(d)
integer, save mpi_size
integer, save jj_begin
integer, save ij_end
logical, save pole_sud
subroutine vtb(number)
Definition: vampir.F90:52
subroutine abort_gcm(modname, message, ierr)
Definition: abort_gcm.F:7
!$Id Turb_fcg_gcssold get_uvd hqturb_gcssold endif!large scale llm day day1 day day1 *dt_toga endif!time annee_ref dt_toga u_toga vq_toga w_prof vq_prof llm day day1 day day1 *dt_dice endif!time annee_ref dt_dice swup_dice vg_dice omega_dice tg_prof vg_profd w_profd omega_profd!do llm!print llm l llm
integer, save jjnb_v
subroutine exchange_hallo(Field, ij, ll, up, down)
integer, save ijb_v
!$Header llmm1 INTEGER ip1jm
Definition: paramet.h:14
subroutine create_distrib(jj_nb_new, d)
!$Id itapm1 ENDIF!IM on interpole les champs sur les niveaux STD de pression!IM a chaque pas de temps de la physique c!positionnement de l argument logique a false c!pour ne pas recalculer deux fois la meme chose!c!a cet effet un appel a plevel_new a ete deplace c!a la fin de la serie d appels c!la boucle DO nlevSTD a ete internalisee c!dans d ou la creation de cette routine c c!CALL false
Definition: calcul_STDlev.h:26
subroutine pack_data(Field, ij, ll, row, Buffer)
subroutine finalize_parallel
logical, save pole_nord
!$Header jjp1
Definition: paramet.h:14
integer comm_lmdz
integer, dimension(:), allocatable, save jj_begin_para
integer, save jje_u
integer, parameter vthallo
Definition: vampir.F90:7
subroutine copy_distrib(dist, new_dist)
integer, save omp_rank
integer, parameter halo_max
subroutine set_distrib(d)
integer, save jj_nb
subroutine allgather_field(Field, ij, ll)
!$Id itapm1 ENDIF!IM on interpole les champs sur les niveaux STD de pression!IM a chaque pas de temps de la physique c!positionnement de l argument logique a false c!pour ne pas recalculer deux fois la meme chose!c!a cet effet un appel a plevel_new a ete deplace c!a la fin de la serie d appels c!la boucle DO nlevSTD a ete internalisee c!dans d ou la creation de cette routine c c!CALL ulevSTD CALL &zphi philevSTD CALL &zx_rh rhlevSTD!DO klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon du jour ou toutes les read_climoz CALL true
integer, save ij_begin
integer, dimension(:), allocatable, save jj_nb_para
character(len=6), save type_ocean
integer, save ije_v
subroutine unpack_data(Field, ij, ll, row, Buffer)
integer, save omp_chunk
subroutine vte(number)
Definition: vampir.F90:69
integer, save jjnb_u
integer, save ijnb_v
logical, save using_mpi
subroutine init_parallel
logical, save using_omp
type(distrib), save current_dist
subroutine initvampir
Definition: vampir.F90:18
integer, save ije_u
integer, save omp_size
integer, save jje_v
!$Header!integer nvarmx s s unit
Definition: gradsdef.h:20
subroutine broadcast_field(Field, ij, ll, rank)
integer, save ijnb_u
integer, dimension(:), allocatable, save jj_end_para
!$Header!gestion des impressions de sorties et de débogage la sortie standard prt_level COMMON comprint lunout
Definition: iniprint.h:7
integer, save ijb_u