]> git.uio.no Git - u/mrichter/AliRoot.git/blame - GEANT321/gparal/gpsumhr.F.ori
Removal of useless dependencies via forward declarations
[u/mrichter/AliRoot.git] / GEANT321 / gparal / gpsumhr.F.ori
CommitLineData
fe4da5cc 1*CMZ : 08/23/94 15.20.00 by John Apostolakis CERN GP-MIMD 2
2*-- Author :
3 subroutine GPSUMHR( idh, file, chopt )
4c
5c Saves histograms into a single RZ file, putting each
6c process' histogram into a subdirectory, as well as saving the
7c running total in the subdirectory 'totals'. At the end the
8c directory 'totals' will contain the sum total of all
9c contributions from all processes.
10c
11c ( A 'replacement' for hrput for parallel Geant.)
12c
13c. Implementation notes:
14c.
15c. Currently chopt is ignored!
16c.
17c-------------------------------------------------------------------------
18#if defined(CERNLIB_PARA)
19 IMPLICIT NONE
20 INTEGER idh
21 CHARACTER*(*) file, chopt
22
23 INTEGER myid
24 character*13 myname
25#include "geant321/mpifinc.inc"
26#include "geant321/multiprox.inc"
27C
28 INTEGER istat, icycle, lunhist
29C
30 integer iquest(100), nrec, itag
31 integer npstat(MPI_STATUS_SIZE), ierr
32 integer idebgsvh, npnext, nfirst, indivout
33 character*1 filemode
34 common /quest/ iquest
35 data nrec / 1024 /
36 data itag / 1001 /
37 data idebgsvh / 1 /
38 data indivout / 1 /
39 data lunhist / 29 /
40 parameter (nfirst=0)
41c----------------------------------------------------------------------
42c J. Apostolakis: Use a directory for each process and a
43c a directory called 'totals' for totals,
44c v0.1 February 9, 1994 using mvlock/mvunlock
45c v0.2 August 4, 1994 using mpi_{send,recv}
46c
47c Node 0 creates the file, others wait their turn (a message goes
48c around that each node receives, does its stuff and sends on)
49c [ Older idea was to use a barrier: call mpi_barrier( MPI_COMM_WORLD ) ]
50c
51c Notes:
52c x The current scheme is not robust, but has worked well.
53c x The potential problem is if one node fails before getting to
54c x this point or during its call to gpsumhr. The former could be
55c x handled by replacing the current method with a robust scheme
56c x capable of handling node failures, by using lockf/unlockf to
57c x lock the file ...
58c x
59c x For file creation, having all nodes try to create a new hbook
60c x file will not work, since one will overwrite another ...
61c
62 if( nprank .eq. nfirst ) then
63 filemode= 'N'
64 nrec= 1024
65 else
66 filemode= 'U'
67 nrec= 0
68c
69c Wait here until the previous node is finished !
70c
71 call mpi_recv( istat, 1, MPI_INTEGER,
72 & nprank-1, itag, MPI_COMM_WORLD, npstat, ierr )
73 endif
74
75 CALL HROPEN ( LUNHIST, 'OUTPUT', file, filemode, nrec, istat)
76
77 if(istat.ne.0) then
78 print *, ' HROPEN of ', file, ' on node ', nprank,
79 & ' failed in gpsumhr. Istat = ', istat
80 else
81 print *, ' HROPEN of ', file, ' on node ', nprank,
82 & ' succeeded and gave nrec=', nrec
83 endif
84
85 myid = nprank
86 if( myid.ge.10000 ) then
87 print *, 'Warning in gpsumhr: The id (',myid,
88 & ')is too big to be used in gpsumhr.f'
89 myid = mod(myid, 10000)
90 endif
91 write (myname, '(a7,i6)') 'process',myid+10000
92 myname(8:8)='0'
93
94 if( idebgsvh .eq. 1 ) then
95 call hldir ( '//PAWC', ' ' )
96 call hldir ( '//OUTPUT', 't' )
97 endif
98
99 call hcdir ( '//OUTPUT', ' ' )
100
101c Could make 'indivout' an option: it creates subdirectories with
102c each node's output.
103 if( indivout .eq. 1 ) then
104 call hmdir ( myname, ' ' )
105 call hcdir ( myname, ' ' ) ! if it has been created already ...
106 CALL HROUT ( idh, icycle,' ')
107 endif
108
109 if( nprank .eq. nfirst ) then
110 call hmdir ( '//OUTPUT/TOTALS','S')
111 else
112 call hcdir ( '//OUTPUT/TOTALS',' ')
113 CALL HRIN ( idh, 888888, 99999)
114 endif
115c ! 99999 is an undocumented feature => it adds
116c the histograms to the ones in memory
117c ------------------------------------------------------------------
118 call hrout ( idh, icycle, 'T')
119
120 CALL HREND ('OUTPUT')
121 close( LUNHIST )
122c
123c Send a message to the next node, which is waiting until this one
124c is finished !
125c
126 npnext = nprank+1
127 if ( npnext .ge. npsize ) npnext = npnext - npsize
128 call mpi_send( istat, 1, MPI_INTEGER,
129 & npnext, itag, MPI_COMM_WORLD, ierr )
130
131c
132c Finally have the first node receive the last node's message!
133c
134 if( nprank .eq. nfirst ) then
135 call mpi_recv( istat, 1, MPI_INTEGER,
136 & npsize-1, itag, MPI_COMM_WORLD, npstat, ierr )
137 endif
138c-----------------------------------------------------------------------
139
140 RETURN
141 END
142#endif