LMDZ
parallel_lmdz.F90
Go to the documentation of this file.
1 !
2 ! $Id: parallel.F90 1810 2013-07-24 08:06:39Z emillour $
3 !
4  MODULE parallel_lmdz
5  USE mod_const_mpi
6 #ifdef CPP_IOIPSL
7  use ioipsl, only: getin
8 #else
9 ! if not using IOIPSL, we still need to use (a local version of) getin
10  use ioipsl_getincom, only: getin
11 #endif
12 
13  LOGICAL,SAVE :: using_mpi=.true.
14  LOGICAL,SAVE :: using_omp
15 
16  integer, save :: mpi_size
17  integer, save :: mpi_rank
18  integer, save :: jj_begin
19  integer, save :: jj_end
20  integer, save :: jj_nb
21  integer, save :: ij_begin
22  integer, save :: ij_end
23  logical, save :: pole_nord
24  logical, save :: pole_sud
25 
26  integer, allocatable, save, dimension(:) :: jj_begin_para
27  integer, allocatable, save, dimension(:) :: jj_end_para
28  integer, allocatable, save, dimension(:) :: jj_nb_para
29  integer, save :: omp_chunk
30  integer, save :: omp_rank
31  integer, save :: omp_size
32 !$OMP THREADPRIVATE(omp_rank)
33 
34 ! Ehouarn: add "dummy variables" (which are in dyn3d_mem/parallel_lmdz.F90)
35 ! so that calfis_loc compiles even if using dyn3dpar
36  integer,save :: jjb_u
37  integer,save :: jje_u
38  integer,save :: jjnb_u
39  integer,save :: jjb_v
40  integer,save :: jje_v
41  integer,save :: jjnb_v
42 
43  integer,save :: ijb_u
44  integer,save :: ije_u
45  integer,save :: ijnb_u
46 
47  integer,save :: ijb_v
48  integer,save :: ije_v
49  integer,save :: ijnb_v
50 
51  contains
52 
53  subroutine init_parallel
54  USE vampir
55  implicit none
56 #ifdef CPP_MPI
57  include 'mpif.h'
58 #endif
59 #include "dimensions.h"
60 #include "paramet.h"
61 #include "iniprint.h"
62 
63  integer :: ierr
64  integer :: i,j
65  integer :: type_size
66  integer, dimension(3) :: blocklen,type
67  integer :: comp_id
68  character(len=4) :: num
69  character(len=20) :: filename
70 
71 #ifdef CPP_OMP
72  INTEGER :: omp_get_num_threads
73  EXTERNAL omp_get_num_threads
74  INTEGER :: omp_get_thread_num
75  EXTERNAL omp_get_thread_num
76 #endif
77 
78 #ifdef CPP_MPI
79  using_mpi=.true.
80 #else
82 #endif
83 
84 
85 #ifdef CPP_OMP
86  using_omp=.true.
87 #else
89 #endif
90 
91  call initvampir
92 
93  IF (using_mpi) THEN
94 #ifdef CPP_MPI
95  call mpi_comm_size(comm_lmdz,mpi_size,ierr)
96  call mpi_comm_rank(comm_lmdz,mpi_rank,ierr)
97 #endif
98  ELSE
99  mpi_size=1
100  mpi_rank=0
101  ENDIF
102 
103 
104 ! Open text output file with mpi_rank in suffix of file name
105  IF (lunout /= 5 .and. lunout /= 6) THEN
106  WRITE(num,'(I4.4)') mpi_rank
107  filename='lmdz.out_'//num
108  IF (mpi_rank .NE. 0) THEN
109  OPEN(unit=lunout,file=trim(filename),action='write', &
110  status='unknown',form='formatted',iostat=ierr)
111  ENDIF
112  ENDIF
113 
114 
115  allocate(jj_begin_para(0:mpi_size-1))
116  allocate(jj_end_para(0:mpi_size-1))
117  allocate(jj_nb_para(0:mpi_size-1))
118 
119  do i=0,mpi_size-1
120  jj_nb_para(i)=(jjm+1)/mpi_size
121  if ( i < mod((jjm+1),mpi_size) ) jj_nb_para(i)=jj_nb_para(i)+1
122 
123  if (jj_nb_para(i) <= 2 ) then
124 
125  write(lunout,*)"Arret : le nombre de bande de lattitude par process est trop faible (<2)."
126  write(lunout,*)" ---> diminuez le nombre de CPU ou augmentez la taille en lattitude"
127 
128 #ifdef CPP_MPI
129  IF (using_mpi) call mpi_abort(comm_lmdz,-1, ierr)
130 #endif
131  endif
132 
133  enddo
134 
135 ! jj_nb_para(0)=11
136 ! jj_nb_para(1)=25
137 ! jj_nb_para(2)=25
138 ! jj_nb_para(3)=12
139 
140  j=1
141 
142  do i=0,mpi_size-1
143 
144  jj_begin_para(i)=j
146  j=j+jj_nb_para(i)
147 
148  enddo
149 
153 
154  ij_begin=(jj_begin-1)*iip1+1
155  ij_end=jj_end*iip1
156 
157  if (mpi_rank.eq.0) then
158  pole_nord=.true.
159  else
160  pole_nord=.false.
161  endif
162 
163  if (mpi_rank.eq.mpi_size-1) then
164  pole_sud=.true.
165  else
166  pole_sud=.false.
167  endif
168 
169  write(lunout,*)"init_parallel: jj_begin",jj_begin
170  write(lunout,*)"init_parallel: jj_end",jj_end
171  write(lunout,*)"init_parallel: ij_begin",ij_begin
172  write(lunout,*)"init_parallel: ij_end",ij_end
173 
174 !$OMP PARALLEL
175 
176 #ifdef CPP_OMP
177 !$OMP MASTER
178  omp_size=omp_get_num_threads()
179 !$OMP END MASTER
180 !$OMP BARRIER
181  omp_rank=omp_get_thread_num()
182 
183 !Config Key = omp_chunk
184 !Config Desc = taille des blocs openmp
185 !Config Def = 1
186 !Config Help = defini la taille des packets d'it�ration openmp
187 !Config distribue a chaque tache lors de l'entree dans une
188 !Config boucle parallelisee
189 
190 !$OMP MASTER
192  IF (mod(llm+1,omp_size)/=0) omp_chunk=omp_chunk+1
193  CALL getin('omp_chunk',omp_chunk)
194 !$OMP END MASTER
195 !$OMP BARRIER
196 #else
197  omp_size=1
198  omp_rank=0
199 #endif
200 !$OMP END PARALLEL
201 
202  end subroutine init_parallel
203 
204 
205  subroutine setdistrib(jj_Nb_New)
206  implicit none
207 
208 #include "dimensions.h"
209 #include "paramet.h"
210 
211  INTEGER,dimension(0:MPI_Size-1) :: jj_Nb_New
212  INTEGER :: i
213 
214  jj_nb_para=jj_nb_new
215 
216  jj_begin_para(0)=1
217  jj_end_para(0)=jj_nb_para(0)
218 
219  do i=1,mpi_size-1
220 
221  jj_begin_para(i)=jj_end_para(i-1)+1
223 
224  enddo
225 
229 
230  ij_begin=(jj_begin-1)*iip1+1
231  ij_end=jj_end*iip1
232 
233  end subroutine setdistrib
234 
235 
236 
237 
238  subroutine finalize_parallel
239 #ifdef CPP_XIOS
240  ! ug Pour les sorties XIOS
241  USE wxios
242 #endif
243 #ifdef CPP_COUPLE
244 ! Use of Oasis-MCT coupler
245 #if defined CPP_OMCT
246  use mod_prism
247 #else
248  use mod_prism_proto
249 #endif
250 ! Ehouarn: surface_data module is in 'phylmd' ...
251  use surface_data, only : type_ocean
252  implicit none
253 #else
254  implicit none
255 ! without the surface_data module, we declare (and set) a dummy 'type_ocean'
256  character(len=6),parameter :: type_ocean="dummy"
257 #endif
258 ! #endif of #ifdef CPP_EARTH
259 
260  include "dimensions.h"
261  include "paramet.h"
262 #ifdef CPP_MPI
263  include 'mpif.h'
264 #endif
265 
266  integer :: ierr
267  integer :: i
268 
269  if (allocated(jj_begin_para)) deallocate(jj_begin_para)
270  if (allocated(jj_end_para)) deallocate(jj_end_para)
271  if (allocated(jj_nb_para)) deallocate(jj_nb_para)
272 
273  if (type_ocean == 'couple') then
274 #ifdef CPP_XIOS
275  !Fermeture propre de XIOS
276  CALL wxios_close()
277 #else
278 #ifdef CPP_COUPLE
279  call prism_terminate_proto(ierr)
280  IF (ierr .ne. prism_ok) THEN
281  call abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
282  endif
283 #endif
284 #endif
285  else
286 #ifdef CPP_XIOS
287  !Fermeture propre de XIOS
288  CALL wxios_close()
289 #endif
290 #ifdef CPP_MPI
291  IF (using_mpi) call mpi_finalize(ierr)
292 #endif
293  end if
294 
295  end subroutine finalize_parallel
296 
297  subroutine pack_data(Field,ij,ll,row,Buffer)
298  implicit none
299 
300 #include "dimensions.h"
301 #include "paramet.h"
302 
303  integer, intent(in) :: ij,ll,row
304  real,dimension(ij,ll),intent(in) ::Field
305  real,dimension(ll*iip1*row), intent(out) :: Buffer
306 
307  integer :: Pos
308  integer :: i,l
309 
310  pos=0
311  do l=1,ll
312  do i=1,row*iip1
313  pos=pos+1
314  buffer(pos)=field(i,l)
315  enddo
316  enddo
317 
318  end subroutine pack_data
319 
320  subroutine unpack_data(Field,ij,ll,row,Buffer)
321  implicit none
322 
323 #include "dimensions.h"
324 #include "paramet.h"
325 
326  integer, intent(in) :: ij,ll,row
327  real,dimension(ij,ll),intent(out) ::Field
328  real,dimension(ll*iip1*row), intent(in) :: Buffer
329 
330  integer :: Pos
331  integer :: i,l
332 
333  pos=0
334 
335  do l=1,ll
336  do i=1,row*iip1
337  pos=pos+1
338  field(i,l)=buffer(pos)
339  enddo
340  enddo
341 
342  end subroutine unpack_data
343 
344 
345  SUBROUTINE barrier
346  IMPLICIT NONE
347 #ifdef CPP_MPI
348  include 'mpif.h'
349 #endif
350  INTEGER :: ierr
351 
352 !$OMP CRITICAL (MPI)
353 #ifdef CPP_MPI
354  IF (using_mpi) CALL mpi_barrier(comm_lmdz,ierr)
355 #endif
356 !$OMP END CRITICAL (MPI)
357 
358  END SUBROUTINE barrier
359 
360 
361  subroutine exchange_hallo(Field,ij,ll,up,down)
362  USE vampir
363  implicit none
364 #include "dimensions.h"
365 #include "paramet.h"
366 #ifdef CPP_MPI
367  include 'mpif.h'
368 #endif
369  INTEGER :: ij,ll
370  REAL, dimension(ij,ll) :: Field
371  INTEGER :: up,down
372 
373  INTEGER :: ierr
374  LOGICAL :: SendUp,SendDown
375  LOGICAL :: RecvUp,RecvDown
376  INTEGER, DIMENSION(4) :: Request
377 #ifdef CPP_MPI
378  INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: Status
379 #else
380  INTEGER, DIMENSION(1,4) :: Status
381 #endif
382  INTEGER :: NbRequest
383  REAL, dimension(:),allocatable :: Buffer_Send_up,Buffer_Send_down
384  REAL, dimension(:),allocatable :: Buffer_Recv_up,Buffer_Recv_down
385  INTEGER :: Buffer_size
386 
387  IF (using_mpi) THEN
388 
389  CALL barrier
390 
391  call vtb(vthallo)
392 
393  sendup=.true.
394  senddown=.true.
395  recvup=.true.
396  recvdown=.true.
397 
398  IF (pole_nord) THEN
399  sendup=.false.
400  recvup=.false.
401  ENDIF
402 
403  IF (pole_sud) THEN
404  senddown=.false.
405  recvdown=.false.
406  ENDIF
407 
408  if (up.eq.0) then
409  senddown=.false.
410  recvup=.false.
411  endif
412 
413  if (down.eq.0) then
414  sendup=.false.
415  recvdown=.false.
416  endif
417 
418  nbrequest=0
419 
420  IF (sendup) THEN
421  nbrequest=nbrequest+1
422  buffer_size=down*iip1*ll
423  allocate(buffer_send_up(buffer_size))
424  call pack_data(field(ij_begin,1),ij,ll,down,buffer_send_up)
425 !$OMP CRITICAL (MPI)
426 #ifdef CPP_MPI
427  call mpi_issend(buffer_send_up,buffer_size,mpi_real8,mpi_rank-1,1, &
428  comm_lmdz,request(nbrequest),ierr)
429 #endif
430 !$OMP END CRITICAL (MPI)
431  ENDIF
432 
433  IF (senddown) THEN
434  nbrequest=nbrequest+1
435 
436  buffer_size=up*iip1*ll
437  allocate(buffer_send_down(buffer_size))
438  call pack_data(field(ij_end+1-up*iip1,1),ij,ll,up,buffer_send_down)
439 
440 !$OMP CRITICAL (MPI)
441 #ifdef CPP_MPI
442  call mpi_issend(buffer_send_down,buffer_size,mpi_real8,mpi_rank+1,1, &
443  comm_lmdz,request(nbrequest),ierr)
444 #endif
445 !$OMP END CRITICAL (MPI)
446  ENDIF
447 
448 
449  IF (recvup) THEN
450  nbrequest=nbrequest+1
451  buffer_size=up*iip1*ll
452  allocate(buffer_recv_up(buffer_size))
453 
454 !$OMP CRITICAL (MPI)
455 #ifdef CPP_MPI
456  call mpi_irecv(buffer_recv_up,buffer_size,mpi_real8,mpi_rank-1,1, &
457  comm_lmdz,request(nbrequest),ierr)
458 #endif
459 !$OMP END CRITICAL (MPI)
460 
461 
462  ENDIF
463 
464  IF (recvdown) THEN
465  nbrequest=nbrequest+1
466  buffer_size=down*iip1*ll
467  allocate(buffer_recv_down(buffer_size))
468 
469 !$OMP CRITICAL (MPI)
470 #ifdef CPP_MPI
471  call mpi_irecv(buffer_recv_down,buffer_size,mpi_real8,mpi_rank+1,1, &
472  comm_lmdz,request(nbrequest),ierr)
473 #endif
474 !$OMP END CRITICAL (MPI)
475 
476  ENDIF
477 
478 #ifdef CPP_MPI
479  if (nbrequest > 0) call mpi_waitall(nbrequest,request,status,ierr)
480 #endif
481  IF (recvup) call unpack_data(field(ij_begin-up*iip1,1),ij,ll,up,buffer_recv_up)
482  IF (recvdown) call unpack_data(field(ij_end+1,1),ij,ll,down,buffer_recv_down)
483 
484  call vte(vthallo)
485  call barrier
486 
487  ENDIF ! using_mpi
488 
489  RETURN
490 
491  end subroutine exchange_hallo
492 
493 
494  subroutine gather_field(Field,ij,ll,rank)
495  implicit none
496 #include "dimensions.h"
497 #include "paramet.h"
498 #include "iniprint.h"
499 #ifdef CPP_MPI
500  include 'mpif.h'
501 #endif
502  INTEGER :: ij,ll,rank
503  REAL, dimension(ij,ll) :: Field
504  REAL, dimension(:),allocatable :: Buffer_send
505  REAL, dimension(:),allocatable :: Buffer_Recv
506  INTEGER, dimension(0:MPI_Size-1) :: Recv_count, displ
507  INTEGER :: ierr
508  INTEGER ::i
509 
510  IF (using_mpi) THEN
511 
512  if (ij==ip1jmp1) then
513  allocate(buffer_send(iip1*ll*(jj_end-jj_begin+1)))
514  call pack_data(field(ij_begin,1),ij,ll,jj_end-jj_begin+1,buffer_send)
515  else if (ij==ip1jm) then
516  allocate(buffer_send(iip1*ll*(min(jj_end,jjm)-jj_begin+1)))
517  call pack_data(field(ij_begin,1),ij,ll,min(jj_end,jjm)-jj_begin+1,buffer_send)
518  else
519  write(lunout,*)ij
520  stop 'erreur dans Gather_Field'
521  endif
522 
523  if (mpi_rank==rank) then
524  allocate(buffer_recv(ij*ll))
525 
526 !CDIR NOVECTOR
527  do i=0,mpi_size-1
528 
529  if (ij==ip1jmp1) then
530  recv_count(i)=(jj_end_para(i)-jj_begin_para(i)+1)*ll*iip1
531  else if (ij==ip1jm) then
532  recv_count(i)=(min(jj_end_para(i),jjm)-jj_begin_para(i)+1)*ll*iip1
533  else
534  stop 'erreur dans Gather_Field'
535  endif
536 
537  if (i==0) then
538  displ(i)=0
539  else
540  displ(i)=displ(i-1)+recv_count(i-1)
541  endif
542 
543  enddo
544 
545  else
546  ! Ehouarn: When in debug mode, ifort complains (for call MPI_GATHERV
547  ! below) about Buffer_Recv() being not allocated.
548  ! So make a dummy allocation.
549  allocate(buffer_recv(1))
550  endif ! of if (MPI_Rank==rank)
551 
552 !$OMP CRITICAL (MPI)
553 #ifdef CPP_MPI
554  call mpi_gatherv(buffer_send,(min(ij_end,ij)-ij_begin+1)*ll,mpi_real8, &
555  buffer_recv,recv_count,displ,mpi_real8,rank,comm_lmdz,ierr)
556 #endif
557 !$OMP END CRITICAL (MPI)
558 
559  if (mpi_rank==rank) then
560 
561  if (ij==ip1jmp1) then
562  do i=0,mpi_size-1
563  call unpack_data(field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, &
564  jj_end_para(i)-jj_begin_para(i)+1,buffer_recv(displ(i)+1))
565  enddo
566  else if (ij==ip1jm) then
567  do i=0,mpi_size-1
568  call unpack_data(field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, &
569  min(jj_end_para(i),jjm)-jj_begin_para(i)+1,buffer_recv(displ(i)+1))
570  enddo
571  endif
572  endif
573  ENDIF ! using_mpi
574 
575  end subroutine gather_field
576 
577 
578  subroutine allgather_field(Field,ij,ll)
579  implicit none
580 #include "dimensions.h"
581 #include "paramet.h"
582 #ifdef CPP_MPI
583  include 'mpif.h'
584 #endif
585  INTEGER :: ij,ll
586  REAL, dimension(ij,ll) :: Field
587  INTEGER :: ierr
588 
589  IF (using_mpi) THEN
590  call gather_field(field,ij,ll,0)
591 !$OMP CRITICAL (MPI)
592 #ifdef CPP_MPI
593  call mpi_bcast(field,ij*ll,mpi_real8,0,comm_lmdz,ierr)
594 #endif
595 !$OMP END CRITICAL (MPI)
596  ENDIF
597 
598  end subroutine allgather_field
599 
600  subroutine broadcast_field(Field,ij,ll,rank)
601  implicit none
602 #include "dimensions.h"
603 #include "paramet.h"
604 #ifdef CPP_MPI
605  include 'mpif.h'
606 #endif
607  INTEGER :: ij,ll
608  REAL, dimension(ij,ll) :: Field
609  INTEGER :: rank
610  INTEGER :: ierr
611 
612  IF (using_mpi) THEN
613 
614 !$OMP CRITICAL (MPI)
615 #ifdef CPP_MPI
616  call mpi_bcast(field,ij*ll,mpi_real8,rank,comm_lmdz,ierr)
617 #endif
618 !$OMP END CRITICAL (MPI)
619 
620  ENDIF
621  end subroutine broadcast_field
622 
623 
624 ! Subroutine verif_hallo(Field,ij,ll,up,down)
625 ! implicit none
626 !#include "dimensions.h"
627 !#include "paramet.h"
628 ! include 'mpif.h'
629 !
630 ! INTEGER :: ij,ll
631 ! REAL, dimension(ij,ll) :: Field
632 ! INTEGER :: up,down
633 !
634 ! REAL,dimension(ij,ll): NewField
635 !
636 ! NewField=0
637 !
638 ! ijb=ij_begin
639 ! ije=ij_end
640 ! if (pole_nord)
641 ! NewField(ij_be
642 
643  end MODULE parallel_lmdz
subroutine barrier
!$Header llmm1 INTEGER ip1jmp1
Definition: paramet.h:14
subroutine gather_field(Field, ij, ll, rank)
integer, save jjb_u
integer, save jjb_v
Definition: vampir.F90:1
integer, save mpi_rank
integer, save jj_end
integer, save mpi_size
integer, save jj_begin
integer, save ij_end
logical, save pole_sud
subroutine vtb(number)
Definition: vampir.F90:52
subroutine abort_gcm(modname, message, ierr)
Definition: abort_gcm.F:7
!$Id Turb_fcg_gcssold get_uvd hqturb_gcssold endif!large scale llm day day1 day day1 *dt_toga endif!time annee_ref dt_toga u_toga vq_toga w_prof vq_prof llm day day1 day day1 *dt_dice endif!time annee_ref dt_dice swup_dice vg_dice omega_dice tg_prof vg_profd w_profd omega_profd!do llm!print llm l llm
integer, save jjnb_v
!$Id klon initialisation mois suivants day_rain itap ENDIF!Calcul fin de nday_rain calcul nday_rain itap DO i
Definition: calcul_divers.h:24
subroutine exchange_hallo(Field, ij, ll, up, down)
integer, save ijb_v
!$Header llmm1 INTEGER ip1jm
Definition: paramet.h:14
!$Id itapm1 ENDIF!IM on interpole les champs sur les niveaux STD de pression!IM a chaque pas de temps de la physique c!positionnement de l argument logique a false c!pour ne pas recalculer deux fois la meme chose!c!a cet effet un appel a plevel_new a ete deplace c!a la fin de la serie d appels c!la boucle DO nlevSTD a ete internalisee c!dans d ou la creation de cette routine c c!CALL false
Definition: calcul_STDlev.h:26
subroutine pack_data(Field, ij, ll, row, Buffer)
subroutine finalize_parallel
logical, save pole_nord
!$Id Turb_fcg!implicit none!ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc!cette routine permet d obtenir hq et ainsi de!pouvoir calculer la convergence et le cisaillement dans la physiq!cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc INTEGER klev REAL j
Definition: 1Dconv.h:27
integer comm_lmdz
integer, dimension(:), allocatable, save jj_begin_para
integer, save jje_u
integer, parameter vthallo
Definition: vampir.F90:7
integer, save omp_rank
integer, save jj_nb
subroutine allgather_field(Field, ij, ll)
!$Id itapm1 ENDIF!IM on interpole les champs sur les niveaux STD de pression!IM a chaque pas de temps de la physique c!positionnement de l argument logique a false c!pour ne pas recalculer deux fois la meme chose!c!a cet effet un appel a plevel_new a ete deplace c!a la fin de la serie d appels c!la boucle DO nlevSTD a ete internalisee c!dans d ou la creation de cette routine c c!CALL ulevSTD CALL &zphi philevSTD CALL &zx_rh rhlevSTD!DO klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon klev DO klon du jour ou toutes les read_climoz CALL true
integer, save ij_begin
integer, dimension(:), allocatable, save jj_nb_para
character(len=6), save type_ocean
integer, save ije_v
subroutine unpack_data(Field, ij, ll, row, Buffer)
subroutine setdistrib(jj_Nb_New)
integer, save omp_chunk
subroutine vte(number)
Definition: vampir.F90:69
integer, save jjnb_u
integer, save ijnb_v
logical, save using_mpi
subroutine init_parallel
logical, save using_omp
subroutine initvampir
Definition: vampir.F90:18
integer, save ije_u
integer, save omp_size
integer, save jje_v
!$Header!integer nvarmx s s unit
Definition: gradsdef.h:20
subroutine broadcast_field(Field, ij, ll, rank)
integer, save ijnb_u
integer, dimension(:), allocatable, save jj_end_para
!$Header!gestion des impressions de sorties et de débogage la sortie standard prt_level COMMON comprint lunout
Definition: iniprint.h:7
integer, save ijb_u