]> git.uio.no Git - u/mrichter/AliRoot.git/blame - PWG1/PWG1macros/syswatchEvalTrain.C
Updates in D+ histos and ntuples (Renu, Francesco, Elena)
[u/mrichter/AliRoot.git] / PWG1 / PWG1macros / syswatchEvalTrain.C
CommitLineData
3d75fc41 1/*
23abc897 2 Make a Analysis/summary of syswatch.root files created in Analysis train
3d75fc41 3
4 Summary created: Memory usage - per train/task
5 CPU usage - per train/task
6 IO load - per hostName
7
8 Input: syswatch.txt - list of syswatch.root files
9 Output: syswatchSummary.root - file with defaut pictures
10 - and summary information of forms of trees
11
12
13 //
14 // input list of syswatch files to create a chain
15 // Local creation e.g using:
16 //
17
18 .L $ALICE_ROOT/PWG1/PWG1macros/syswatchEvalTrain.C
19
20*/
21
22
23#if !defined(__CINT__) || defined(__MAKECINT__)
24#include <fstream>
25#include "TSystem.h"
26#include "TFile.h"
27#include "TObjArray.h"
28#include "TObjString.h"
29#include "TChain.h"
30#include "TH1.h"
31#include "TCut.h"
32#include "THashList.h"
33#include "TGraphErrors.h"
34#include "TF1.h"
35#include "TMap.h"
36#include "TTreeStream.h"
37#endif
38
39
40
41TChain * chainWatch = 0;
42TTreeSRedirector * pcstream = 0;
43TObjArray * picArray = new TObjArray;
44TObjArray * taskSummary = new TObjArray;
45Int_t maxEntries = 100000; // maximal amount of points to visualize
46
47//
48// export variables
49//
50Double_t ftimeSlope = 0; // time slope per event
51Double_t fmemSlope = 0; // mem slope per event
52Double_t fmemOffset = 0; // mem offset at the initialization
53Double_t fmeandVM = 0; // mean delta VM per task
54Double_t fmeandT = 0; // mean delta T per task
55TCut cutStep1="1"; // skipt the points if too many
56//
57// functions:
58//
59void AddToChain(TString inputList);
60void SetAliasTrain();
61void GetSummaryTrain();
62void GetSummaryTask();
63void GetSummaryHost();
64void PrintPairSummary(TObjArray *array);
65
66
67void syswatchEvalTrain(){
68 AddToChain("syswatch.txt");
69 SetAliasTrain();
70 Int_t allEntries = chainWatch->GetEntries();
71 if (allEntries>maxEntries) cutStep1 = Form("((%s%d)==0)","Entry$%",allEntries/maxEntries+1);
72 pcstream = new TTreeSRedirector("syswatchSummary.root");
73 pcstream->GetFile()->cd();
74 //
75 GetSummaryTrain();
76 GetSummaryTask();
77 GetSummaryHost();
78 //picArray->Write("Pictures");
79 //taskSummary->Write("TaskSummary",TObjectArray::kSingleKey);
80 delete pcstream;
81}
82
83void PrintPairSummary(TObjArray *array){
84
85 for (Int_t id1=0; id1<array->GetEntries(); id1++){
86 TPair *pair= (TPair*)array->At(id1);
87 printf("%s\t%s\n",pair->GetName(),pair->Value()->GetName());
88 }
89}
90
91
92
93void AddToChain(TString inputList){
94 //
95 // add the files form inputList into chain
96 //
97 if (!chainWatch) chainWatch = new TChain("syswatch");
98 ifstream in;
99 in.open(inputList.Data());
100 Int_t counter=0;
101 TString currentFile;
102 while(in.good()) {
103 in >> currentFile;
104 if (!currentFile.Contains(".root")) continue;
105 chainWatch->Add(currentFile.Data());
106 printf("%d\t%s\n",counter,currentFile.Data());
107 counter++;
108 }
109}
110
111
112
113void SetAliasTrain(){
114 //
115 // Time, Memory consumption and host Id stored for given snapshot
116 // Snapshots are identified using 3 ids: id0, id1 and id2
117 // Each sanphsot is identeified also using snapshot name
118 //
119 // Meaning of ids depends on the type of ananlysis
120 //
121 // Alieses according train conventions
122 // id conventions in the ANALYSIS is following:
123 // id1 - task id number
124 // - 1000 reserved for the Handlers_BeginEvent
125 chainWatch->SetAlias("EventNumber","id0");
126 chainWatch->SetAlias("isTask","id1<200&&id1>=0");
127 chainWatch->SetAlias("isReading","id1==1000");
128 chainWatch->SetAlias("isEvent","id0>10"); // skip first events
129 chainWatch->SetAlias("hostName","hname");
130 chainWatch->SetAlias("stampName","sname");
131}
132
133void GetSummaryTrain(){
134 //
135 // Get the Summary info per train
136 //
137 TF1 ftime("ftime","[0]*x");
138 TF1 fmem("fmem","pol1");
139 ftime.SetLineColor(2);
140 fmem.SetLineColor(2);
141 TGraph *gr = 0;
142 Int_t entries=0;
143 //
144 //
145 //
146 entries = chainWatch->Draw("T:EventNumber*0.001",cutStep1+"isTask&&isEvent","goff");
147 gr=new TGraphErrors(entries, chainWatch->GetV2(),chainWatch->GetV1(),0,0);
148 gr->SetName("TvEventNumber");
149 gr->SetTitle("T:EventNumber");
150 gr->GetXaxis()->SetTitle("Event Nr. (10^3)");
151 gr->GetYaxis()->SetTitle("T (s)");
152 gr->Fit(&ftime,"ROB=0.7");
153 gr->Draw("ap");
154 picArray->AddLast(gr->Clone());
155 pcstream->GetFile()->cd();
156 gr->Write();
157 //
158 entries = chainWatch->Draw("0.001*VM:0.001*EventNumber",cutStep1+"isTask&&isEvent");
159 gr=new TGraphErrors(entries, chainWatch->GetV2(),chainWatch->GetV1(),0,0);
160 gr->SetName("VMvEventNumber");
161 gr->SetTitle("VM:EventNumber");
162 gr->GetXaxis()->SetTitle("Event Nr. (10^3)");
163 gr->GetYaxis()->SetTitle("Virtual memory (GBy)");
164 gr->Fit(&fmem,"ROB=0.7");
165 gr->Draw("ap");
166 picArray->AddLast(gr->Clone());
167 pcstream->GetFile()->cd();
168 gr->Write();
169
170 fmemSlope=fmem.GetParameter(1);
171 fmemOffset=fmem.GetParameter(0);
172 ftimeSlope=ftime.GetParameter(0);
173 picArray->AddLast(gr->Clone());
174 pcstream->GetFile()->cd();
175 gr->Write();
176 (*pcstream)<<"summaryInfo"<<
177 "ftimeSlope="<<ftimeSlope<< // time slope per event
178 "fmemSlope="<<fmemSlope<< // mem slope per event
179 "fmemOffset="<<fmemOffset<< // mem offset at the initialization
180 "fmeandVM="<<fmeandVM<< // mean delta VM per task
181 "fmeandT="<<fmeandT<< // mean delta T per task
182 "\n";
183}
184
185void GetSummaryTask(){
186 //
187 //
188 //
189 TH1 * hisVM =0;
190 TH1 * hisT =0;
191
192 chainWatch->Draw("1000*deltaVM:sname",cutStep1+"isTask&&isEvent","profile");
193 hisVM = (TH1*)chainWatch->GetHistogram()->Clone();
194 hisVM->SetMarkerStyle(20);
195 hisVM->SetName("deltaVMperTask");
196 hisVM->SetTitle("delta VM per Task");
197 hisVM->GetYaxis()->SetTitle("#DeltaVM/Event (kBy)");
198 hisVM->Draw();
199 picArray->AddLast(hisVM);
200 pcstream->GetFile()->cd();
201 hisVM->Write();
202 //
203 chainWatch->Draw("1000*deltaT:sname",cutStep1+"isTask&&isEvent","profile");
204 hisT = (TH1*)chainWatch->GetHistogram()->Clone();
205 hisT->SetMarkerStyle(20);
206 hisT->SetName("deltaTperTask");
207 hisT->SetTitle("deltaT per Task");
208 hisT->GetYaxis()->SetTitle("#Delta_{t}/Event (ms)");
209 hisT->Draw();
210 picArray->AddLast(hisT);
211 pcstream->GetFile()->cd();
212 hisT->Write();
213 //
214 //
215 Int_t nbins = hisT->GetXaxis()->GetNbins();
216 for (Int_t iname=0; iname<nbins; iname++){
217 TObjString *hname= new TObjString(hisVM->GetXaxis()->GetLabels()->At(iname)->GetName());
218 printf("%s\t%f\t%f\n",hname->GetName(), hisVM->GetBinContent(iname+1),hisT->GetBinContent(iname+1));
219 Double_t vmev = hisVM->GetBinContent(iname+1);
220 Double_t tev = hisT->GetBinContent(iname+1);
221 Double_t vmevErr = hisVM->GetBinError(iname+1);
222 Double_t tevErr = hisT->GetBinError(iname+1);
223 char hstring[1000];
224 sprintf(hstring,"%s",hname->GetName());
225 (*pcstream)<<"taskInfo"<<
23abc897 226 "taskName.="<<hname<< // host name
3d75fc41 227 "vmev="<< vmev<< // memory per task per even
228 "tev="<< tev<< // time per event per task
229 "vmevErr="<< vmevErr<< // memory per task per even
230 "teverr="<< tevErr<< // time per event per task
231 // MEAN summary (ALL)
232 "ftimeSlope="<<ftimeSlope<< // time slope per event
233 "fmemSlope="<<fmemSlope<< // mem slope per event
234 "fmemOffset="<<fmemOffset<< // mem offset at the initialization
235 "fmeandVM="<<fmeandVM<< // mean delta VM per task
236 "fmeandT="<<fmeandT<< // mean delta T per task
237 "\n";
238 }
239}
240
241
242void GetSummaryHost(){
243 //
244 // Get summary information per hosts
245 // 2 histograms
246 //
247 Int_t nbins=0;
248 TH1 * hisIOT=0;
249 TH1 * hisT=0;
250 chainWatch->Draw("0.0000001*fileBytesRead/T:hostName","EventNumber>0&&isReading","prof");
251 hisIOT = (TH1*)chainWatch->GetHistogram()->Clone();
252 hisIOT->SetDirectory(0);
253 chainWatch->Draw("1000*T/EventNumber:hostName","EventNumber>0&&isReading","prof");
254 hisT = (TH1*)chainWatch->GetHistogram()->Clone();
255 hisT->SetDirectory(0);
256 //
257 hisIOT->SetTitle("input MBy/s per host");
258 hisIOT->SetName("input-MBysPerHost");
259 hisIOT->SetTitle("MBy/s");
260 hisIOT->GetYaxis()->SetTitle("MBy/s");
261 hisIOT->SetMarkerStyle(22);
262 hisIOT->Draw();
263 picArray->AddLast(hisIOT);
264 pcstream->GetFile()->cd();
265 hisIOT->Write();
266 //
267 hisT->SetTitle("Event/ms per host");
268 hisT->SetName("Event/ms per Host");
269 hisT->GetYaxis()->SetTitle("Events/ms");
270 hisT->SetMarkerStyle(22);
271 hisT->Draw();
272 picArray->AddLast(hisT);
273 pcstream->GetFile()->cd();
274 hisT->Write();
275 //
276 nbins = hisIOT->GetXaxis()->GetNbins();
277 for (Int_t iname=0; iname<nbins; iname++){
278 TObjString *hname= new TObjString(hisIOT->GetXaxis()->GetLabels()->At(iname)->GetName());
279 printf("%s\t%f\t%f\n",hname->GetName(), hisIOT->GetBinContent(iname+1),hisT->GetBinContent(iname+1));
280 Double_t iot= hisIOT->GetBinContent(iname+1);
281 Double_t tev= hisT->GetBinContent(iname+1);
282 char hstring[1000];
283 sprintf(hstring,"%s",hname->GetName());
284 (*pcstream)<<"hostInfo"<<
23abc897 285 "hostName.="<<hname<< // host name
3d75fc41 286 "iot="<< iot<< // reading per second
287 "tev="<< tev<< // events per milisecond
288 // mean summary (all)
289 "ftimeSlope="<<ftimeSlope<< // time slope per event
290 "fmemSlope="<<fmemSlope<< // mem slope per event
291 "fmemOffset="<<fmemOffset<< // mem offset at the initialization
292 "fmeandVM="<<fmeandVM<< // mean delta VM per task
293 "fmeandT="<<fmeandT<< // mean delta T per task
294 "\n";
295 }
296
297}