]>
Commit | Line | Data |
---|---|---|
fe4da5cc | 1 | *CMZ : 11/11/94 15.39.18 by John Apostolakis CERN GP-MIMD 2 |
2 | *-- Author : John Apostolakis CERN GP-MIMD 2 13/07/94 | |
3 | **DOC | |
4 | C Parallel Geant - main routines, to be used in parallelising GEANT programs. | |
5 | C Target architectures: scalable parallel processors and (for appropriate | |
6 | C problems) workstation clusters | |
7 | C Implemenation relies on availability of: | |
8 | C An implementation of the MPI (Message Passing Interface Standard) for the | |
9 | C relevant ensemble of machines or parallel processor. | |
10 | C | |
11 | C A common file base for input and output files, such as NFS or AFS. | |
12 | C | |
13 | C [ NOTE: MPI Implementations exist for most Unix machines, including | |
14 | C the portable, public domain, implementation. For more information see | |
15 | C the MPI Web page at URL http://www.mcs.anl.gov/mpi/index.html ] | |
16 | C --- | |
17 | C First released version March 1996, John Apostolakis japost@dxcern.cern.ch | |
18 | C -------------------------------------------------------------------------- | |
19 | C subroutine gprun | |
20 | C | |
21 | C Function: in some cases of static event partitioning, it | |
22 | C makes sure each node only does its portion of the events to | |
23 | C to be simulated. | |
24 | C | |
25 | C called by: grun (if CERNLIB_PARA switch is used) | |
26 | C------------------------------------------------------------------------ | |
27 | **ENDDOC | |
28 | #if defined(CERNLIB_PARA) | |
29 | subroutine gprun | |
30 | c Routine is called at the start of grun, to normalize Number of Events | |
31 | c in the case of a "static" decomposition of events | |
32 | implicit none | |
33 | * | |
34 | #include "geant321/gcflag.inc" | |
35 | #include "geant321/multiprox.inc" | |
36 | * | |
37 | * | |
38 | integer modsize | |
39 | * | |
40 | c The default method of partitioning events between processors is | |
41 | c the Static method. | |
42 | ||
43 | c To choose the dynamic method, in which events are shared out | |
44 | c to different nodes by a master, the user must call gpdynamic | |
45 | c and write a multiplexing routine (modifying the routine muxread | |
46 | c in the example gexam3). | |
47 | ||
48 | ||
49 | if ( npleader .eq. -1) then | |
50 | c | |
51 | c Static distribution of events, the new default | |
52 | c All nodes share the events equally. It will work well if events take | |
53 | c close to the same CPU time to simulate, ie the standard deviation | |
54 | c of the event time is small compared to the average event time. | |
55 | c | |
56 | c In this case do a static distribution of events ... | |
57 | c (assumes 1<=nprank<npsize, so it is OK for MPI, where 0<=nprank<npsize ) | |
58 | c | |
59 | c | |
60 | nevtot = nevent | |
61 | nevent = nevtot / npsize | |
62 | modsize = mod(nevtot,npsize) | |
63 | if( nprank .lt. modsize ) then | |
64 | nevent = nevent + 1 | |
65 | endif | |
66 | else | |
67 | c | |
68 | c Dynamic distribution of events, the new default | |
69 | c | |
70 | c This version does dynamic distribution of events, and terminates at | |
71 | c the end of the input file. The number of events each node will | |
72 | c simulate is not determined in advance. Each node requests work | |
73 | c from the "master" node, which gives it out (the "farming" model). | |
74 | c The number of events that each node will simulate can vary, so it | |
75 | c is best to make the maximum number of events | |
76 | c per node equal be the original nevent [ ... or else one node could | |
77 | c terminate unexpectedly ] | |
78 | c | |
79 | nevtot = nevent | |
80 | c | |
81 | c This code must be used in conjunction with additional code in | |
82 | c the routine "gukine" to handle sharing the work load. An example | |
83 | c of such a routine can be found in "muxread". | |
84 | ||
85 | endif | |
86 | ||
87 | return | |
88 | end | |
89 | #endif |