source: trunk/examples/extended/parallel/MPI/mpi_interface/src/G4MPImanager.cc@ 809

Last change on this file since 809 was 807, checked in by garnier, 17 years ago

update

File size: 15.6 KB
Line 
1//
2// ********************************************************************
3// * License and Disclaimer *
4// * *
5// * The Geant4 software is copyright of the Copyright Holders of *
6// * the Geant4 Collaboration. It is provided under the terms and *
7// * conditions of the Geant4 Software License, included in the file *
8// * LICENSE and available at http://cern.ch/geant4/license . These *
9// * include a list of copyright holders. *
10// * *
11// * Neither the authors of this software system, nor their employing *
12// * institutes,nor the agencies providing financial support for this *
13// * work make any representation or warranty, express or implied, *
14// * regarding this software system or assume any liability for its *
15// * use. Please see the license in the file LICENSE and URL above *
16// * for the full disclaimer and the limitation of liability. *
17// * *
18// * This code implementation is the result of the scientific and *
19// * technical work of the GEANT4 collaboration. *
20// * By using, copying, modifying or distributing the software (or *
21// * any work based on the software) you agree to acknowledge its *
22// * use in resulting scientific publications, and indicate your *
23// * acceptance of all terms of the Geant4 Software license. *
24// ********************************************************************
25//
26// $Id: G4MPImanager.cc,v 1.1 2007/11/16 14:05:41 kmura Exp $
27// $Name: $
28//
29// ====================================================================
30// G4MPImanager.cc
31//
32// 2007 Q
33// ====================================================================
34#include "G4MPImanager.hh"
35#include "G4MPImessenger.hh"
36#include "G4MPIsession.hh"
37#include "G4MPIbatch.hh"
38#include "G4MPIstatus.hh"
39#include "G4MPIrandomSeedGenerator.hh"
40#include "G4UImanager.hh"
41#include "G4RunManager.hh"
42#include "G4StateManager.hh"
43#include "G4Run.hh"
44#include <time.h>
45#include <stdio.h>
46#include <getopt.h>
47
48G4MPImanager* G4MPImanager::theManager= 0;
49
50// ====================================================================
51// wrappers for thread functions
52// ====================================================================
53
54////////////////////////////////////////////////////////////////
55static void thread_ExecuteThreadCommand(const G4String* command)
56////////////////////////////////////////////////////////////////
57{
58 G4MPImanager::GetManager()-> ExecuteThreadCommand(*command);
59}
60
61
62// ====================================================================
63//
64// class description
65//
66// ====================================================================
67
68////////////////////////////
69G4MPImanager::G4MPImanager()
70 : verbose(0),
71 qfcout(false),
72 qinitmacro(false),
73 qbatchmode(false),
74 threadID(0),
75 masterWeight(1.)
76////////////////////////////
77{
78 //MPI::Init();
79 MPI::Init_thread(MPI::THREAD_SERIALIZED);
80 Initialize();
81}
82
83
84/////////////////////////////////////////////////
85G4MPImanager::G4MPImanager(int argc, char** argv)
86 : verbose(0), qfcout(false),
87 qinitmacro(false), qbatchmode(false),
88 threadID(0),
89 masterWeight(1.)
90/////////////////////////////////////////////////
91{
92 //MPI::Init(argc, argv);
93 MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
94 Initialize();
95 ParseArguments(argc, argv);
96}
97
98
99/////////////////////////////
100G4MPImanager::~G4MPImanager()
101/////////////////////////////
102{
103 if(isSlave && qfcout) fscout.close();
104
105 delete status;
106 delete messenger;
107 delete session;
108
109 COMM_G4COMMAND.Free();
110
111 MPI::Finalize();
112
113 theManager= 0;
114}
115
116
117////////////////////////////////////////
118G4MPImanager* G4MPImanager::GetManager()
119////////////////////////////////////////
120{
121 if(theManager==0) {
122 G4Exception("G4MPImanager is not created.");
123 }
124 return theManager;
125}
126
127
128///////////////////////////////
129void G4MPImanager::Initialize()
130///////////////////////////////
131{
132 if(theManager != 0) {
133 G4Exception("G4MPImanager is constructed twice.");
134 }
135 theManager= this;
136
137 // get rank information
138 size= MPI::COMM_WORLD.Get_size();
139 rank= MPI::COMM_WORLD.Get_rank();
140 isMaster= (rank == RANK_MASTER);
141 isSlave= (rank != RANK_MASTER);
142
143 // initialize MPI communicator
144 COMM_G4COMMAND= MPI::COMM_WORLD.Dup();
145
146 // new G4MPI stuffs
147 messenger= new G4MPImessenger(this);
148
149 session= new G4MPIsession;
150
151 status= new G4MPIstatus;
152
153 // default seed generator is random generator.
154 seedGenerator= new G4MPIrandomSeedGenerator;
155 DistributeSeeds();
156
157}
158
159
160////////////////////////////////////////////////////////
161void G4MPImanager::ParseArguments(int argc, char** argv)
162////////////////////////////////////////////////////////
163{
164 G4int qhelp= 0;
165 G4String ofprefix="mpi";
166
167 G4int c;
168 while (1) {
169 G4int option_index= 0;
170 static struct option long_options[] = {
171 {"help", 0, 0, 0},
172 {"verbose", 0, 0, 0},
173 {"init", 1, 0, 0},
174 {"ofile", 2, 0, 0},
175 {0, 0, 0, 0}
176 };
177
178 opterr= 0; // suppress message
179 c= getopt_long(argc, argv, "hvi:o", long_options, &option_index);
180 opterr= 1;
181
182 if(c == -1) break;
183
184 switch (c) {
185 case 0:
186 switch(option_index) {
187 case 0 : // --help
188 qhelp= 1;
189 break;
190 case 1 : // --verbose
191 verbose= 1;
192 break;
193 case 2 : // --init
194 qinitmacro= true;
195 initFileName= optarg;
196 break;
197 case 3 : // --ofile
198 qfcout= true;
199 if(optarg) ofprefix= optarg;
200 break;
201 }
202 break;
203 case 'h' :
204 qhelp=1;
205 break;
206 case 'v' :
207 verbose= 1;
208 break;
209 case 'i' :
210 qinitmacro= true;
211 initFileName= optarg;
212 break;
213 case 'o' :
214 qfcout= true;
215 break;
216 default:
217 break;
218 }
219 }
220
221 // show help
222 if(qhelp) {
223 if(isMaster) ShowHelp();
224 MPI::Finalize();
225 exit(0);
226 }
227
228 // file output
229 if(isSlave && qfcout) {
230 G4String prefix= ofprefix+".%03d"+".cout";
231 char str[1024];
232 sprintf(str, prefix.c_str(), rank);
233 G4String fname(str);
234 fscout.open(fname.c_str(), std::ios::out);
235 }
236
237 // non-option ARGV-elements ...
238 if (optind < argc ) {
239 qbatchmode= true;
240 macroFileName= argv[optind];
241 }
242}
243
244
245//////////////////////////////////////////
246void G4MPImanager::Wait(G4int ausec) const
247//////////////////////////////////////////
248{
249 struct timespec treq, trem;
250 treq.tv_sec = 0;
251 treq.tv_nsec = ausec*1000;
252
253 nanosleep(&treq, &trem);
254}
255
256// ====================================================================
257
258/////////////////////////////////
259void G4MPImanager::UpdateStatus()
260/////////////////////////////////
261{
262 G4RunManager* runManager= G4RunManager::GetRunManager();
263 const G4Run* run= runManager-> GetCurrentRun();
264
265 G4int runid, eventid, neventTBP;
266
267 if (run) { // running...
268 runid= run-> GetRunID();
269 neventTBP= run -> GetNumberOfEventToBeProcessed();
270 eventid= run-> GetNumberOfEvent();
271 } else {
272 runid= 0;
273 eventid= 0;
274 neventTBP= 0;
275 }
276
277 G4StateManager* stateManager= G4StateManager::GetStateManager();
278 G4ApplicationState g4state= stateManager-> GetCurrentState();
279
280 status-> SetStatus(rank, runid, neventTBP, eventid, g4state);
281}
282
283
284///////////////////////////////
285void G4MPImanager::ShowStatus()
286///////////////////////////////
287{
288 G4int buff[G4MPIstatus::NSIZE];
289
290 UpdateStatus();
291 G4bool gstatus= CheckThreadStatus();
292
293 if(isMaster) {
294 status-> Print(); // for maser itself
295
296 G4int nev= status-> GetEventID();
297 G4int nevtp= status-> GetNEventToBeProcessed();
298 G4double cputime= status-> GetCPUTime();
299
300 // receive from each slave
301 for (G4int islave=1; islave< size; islave++) {
302 COMM_G4COMMAND.Recv(buff, G4MPIstatus::NSIZE, MPI::INT,
303 islave, TAG_G4STATUS);
304 status-> UnPack(buff);
305 status-> Print();
306
307 // aggregation
308 nev+= status-> GetEventID();
309 nevtp+= status-> GetNEventToBeProcessed();
310 //mpistate= status-> GetG4State();
311 cputime+= status-> GetCPUTime();
312 }
313
314 G4String strStatus;
315 if(gstatus) {
316 strStatus= "Run";
317 } else {
318 strStatus= "Idle";
319 }
320
321 G4cout << "-------------------------------------------------------"
322 << G4endl
323 << "* #ranks= " << size
324 << " event= " << nev << "/" << nevtp
325 << " state= " << strStatus
326 << " time= " << cputime << "s"
327 << G4endl;
328 } else {
329 status-> Pack(buff);
330 COMM_G4COMMAND.Send(buff, G4MPIstatus::NSIZE, MPI::INT,
331 RANK_MASTER, TAG_G4STATUS);
332 }
333}
334
335// ====================================================================
336
337////////////////////////////////////
338void G4MPImanager::DistributeSeeds()
339////////////////////////////////////
340{
341 std::vector<G4long> seedList= seedGenerator-> GetSeedList();
342 CLHEP::HepRandom::setTheSeed(seedList[rank]);
343}
344
345
346//////////////////////////////
347void G4MPImanager::ShowSeeds()
348//////////////////////////////
349{
350 G4long buff;
351
352 if(isMaster) {
353 // print master
354 G4cout << "* rank= " << rank
355 << " seed= " << CLHEP::HepRandom::getTheSeed()
356 << G4endl;
357 // receive from each slave
358 for (G4int islave=1; islave< size; islave++) {
359 COMM_G4COMMAND.Recv(&buff, 1, MPI::LONG, islave, TAG_G4SEED);
360 G4cout << "* rank= " << islave
361 << " seed= " << buff
362 << G4endl;
363 }
364 } else { // slaves
365 buff= CLHEP::HepRandom::getTheSeed();
366 COMM_G4COMMAND.Send(&buff, 1, MPI::LONG, RANK_MASTER, TAG_G4SEED);
367 }
368}
369
370
371////////////////////////////////////////////////////
372void G4MPImanager::SetSeed(G4int inode, G4long seed)
373////////////////////////////////////////////////////
374{
375 if(rank==inode) {
376 CLHEP::HepRandom::setTheSeed(seed);
377 }
378}
379
380// ====================================================================
381
382////////////////////////////////////////
383G4bool G4MPImanager::CheckThreadStatus()
384////////////////////////////////////////
385{
386 unsigned buff;
387 G4bool qstatus= false;
388
389 if(isMaster) {
390 qstatus= threadID;
391 // get slave status
392 for (G4int islave=1; islave< size; islave++) {
393 COMM_G4COMMAND.Recv(&buff, 1, MPI::UNSIGNED, islave, TAG_G4STATUS);
394 qstatus |= buff;
395 }
396 } else {
397 buff= unsigned(threadID);
398 COMM_G4COMMAND.Send(&buff, 1, MPI::UNSIGNED, RANK_MASTER, TAG_G4STATUS);
399 }
400
401 // broadcast
402 buff= qstatus; // for master
403 COMM_G4COMMAND.Bcast(&buff, 1, MPI::UNSIGNED, RANK_MASTER);
404 qstatus= buff; // for slave
405
406 return qstatus;
407}
408
409
410////////////////////////////////////////////////////////////////
411void G4MPImanager::ExecuteThreadCommand(const G4String& command)
412////////////////////////////////////////////////////////////////
413{
414 // this method is a thread function.
415 G4UImanager* UI= G4UImanager::GetUIpointer();
416 G4int rc= UI-> ApplyCommand(command);
417
418 G4int commandStatus = rc - (rc%100);
419
420 switch(commandStatus) {
421 case fCommandSucceeded:
422 break;
423 case fIllegalApplicationState:
424 G4cerr << "illegal application state -- command refused" << G4endl;
425 break;
426 default:
427 G4cerr << "command refused (" << commandStatus << ")" << G4endl;
428 break;
429 }
430
431 // thread is joined
432 if(threadID) {
433 pthread_join(threadID, 0);
434 threadID= 0;
435 }
436
437 return;
438}
439
440
441///////////////////////////////////////////////////////////////
442void G4MPImanager::ExecuteBeamOnThread(const G4String& command)
443///////////////////////////////////////////////////////////////
444{
445 G4bool threadStatus= CheckThreadStatus();
446
447 if (threadStatus) {
448 if(isMaster) {
449 G4cout << "G4MPIsession:: beamOn is still running." << G4endl;
450 }
451 } else { // ok
452 static G4String cmdstr;
453 cmdstr= command;
454 G4int rc= pthread_create(&threadID, 0,
455 (Func_t)thread_ExecuteThreadCommand,
456 (void*)&cmdstr);
457 if (rc != 0)
458 G4Exception("G4MPIsession:: failed to create a beamOn thread.");
459 }
460}
461
462
463/////////////////////////////////////
464void G4MPImanager::JoinBeamOnThread()
465/////////////////////////////////////
466{
467 if(threadID) {
468 pthread_join(threadID, 0);
469 threadID= 0;
470 }
471}
472
473
474// ====================================================================
475
476////////////////////////////////////////////////////////////
477G4String G4MPImanager::BcastCommand(const G4String& command)
478////////////////////////////////////////////////////////////
479{
480 enum { BUFF_SIZE= 512 };
481 static char sbuff[BUFF_SIZE];
482 command.copy(sbuff,BUFF_SIZE);
483 G4int len= command.size();
484 sbuff[len]='\0'; // no boundary check
485
486 // "command" is not yet fixed in slaves at this time.
487
488 // waiting message exhausts CPU in LAM!
489 //COMM_G4COMMAND.Bcast(sbuff, BUFF_SIZE, MPI::CHAR, RANK_MASTER);
490
491 // another implementation
492 if( isMaster ) {
493 for (G4int islave=1; islave< size; islave++) {
494 COMM_G4COMMAND.Send(sbuff, BUFF_SIZE, MPI::CHAR, islave, TAG_G4COMMAND);
495 }
496 } else {
497 // try non-blocking receive
498 MPI::Request request= COMM_G4COMMAND.Irecv(sbuff, BUFF_SIZE, MPI::CHAR,
499 RANK_MASTER, TAG_G4COMMAND);
500 // polling...
501 MPI::Status status;
502 while(! request.Test(status)) {
503 Wait(100);
504 }
505 }
506
507 return G4String(sbuff);
508}
509
510// ====================================================================
511
512/////////////////////////////////////////////////////////////////////////
513void G4MPImanager::ExecuteMacroFile(const G4String& fname, G4bool qbatch)
514/////////////////////////////////////////////////////////////////////////
515{
516 G4MPIbatch* batchSession= new G4MPIbatch(fname, qbatch);
517 batchSession-> SessionStart();
518 delete batchSession;
519}
520
521
522///////////////////////////////////////////////////////
523void G4MPImanager::BeamOn(G4int nevent, G4bool qdivide)
524///////////////////////////////////////////////////////
525{
526 G4RunManager* runManager= G4RunManager::GetRunManager();
527
528 if(qdivide) { // events are divided
529 G4double ntot= masterWeight+size-1.;
530 G4int nproc= G4int(nevent/ntot);
531 G4int nproc0= nevent-nproc*(size-1);
532
533 if(verbose>0 && isMaster) {
534 G4cout << "#events in master=" << nproc0 << " / "
535 << "#events in slave=" << nproc << G4endl;
536 }
537
538 status-> StartTimer(); // start timer
539 if(isMaster) runManager-> BeamOn(nproc0);
540 else runManager-> BeamOn(nproc);
541 status-> StopTimer(); // stop timer
542
543 } else { // same events are generated in each node (for test use)
544 if(verbose>0 && isMaster) {
545 G4cout << "#events in master=" << nevent << " / "
546 << "#events in slave=" << nevent << G4endl;
547 }
548 status-> StartTimer(); // start timer
549 runManager-> BeamOn(nevent);
550 status-> StopTimer(); // stop timer
551 }
552}
553
554
555/////////////////////////////////////////////////
556void G4MPImanager::Print(const G4String& message)
557/////////////////////////////////////////////////
558{
559 if(isMaster){
560 std::cout << message << std::flush;
561 } else {
562 if(qfcout) { // output to a file
563 fscout << message << std::flush;
564 } else { // output to stdout
565 std::cout << rank << ":" << message << std::flush;
566 }
567 }
568}
569
570
571///////////////////////////////////
572void G4MPImanager::ShowHelp() const
573///////////////////////////////////
574{
575 if(isSlave) return;
576
577 G4cout << "Geant4 MPI interface" << G4endl;
578 G4cout << "usage:" << G4endl;
579 G4cout << "<app> [options] [macro file]"
580 << G4endl << G4endl;
581 G4cout << " -h, --help show this message."
582 << G4endl;
583 G4cout << " -v, --verbose show verbose message"
584 << G4endl;
585 G4cout << " -i, --init=FNAME set an init macro file"
586 << G4endl;
587 G4cout << " -o, --ofile[=FNAME] set slave output to a flie"
588 << G4endl;
589 G4cout << G4endl;
590
591}
592
Note: See TracBrowser for help on using the repository browser.