]> git.uio.no Git - u/mrichter/AliRoot.git/blame - STAT/TStatToolkit.cxx
Using the RecoParam from the OCDB as a baseline
[u/mrichter/AliRoot.git] / STAT / TStatToolkit.cxx
CommitLineData
21f3a443 1/**************************************************************************
2 * Copyright(c) 1998-1999, ALICE Experiment at CERN, All rights reserved. *
3 * *
4 * Author: The ALICE Off-line Project. *
5 * Contributors are mentioned in the code where appropriate. *
6 * *
7 * Permission to use, copy, modify and distribute this software and its *
8 * documentation strictly for non-commercial purposes is hereby granted *
9 * without fee, provided that the above copyright notice appears in all *
10 * copies and that both the copyright notice and this permission notice *
11 * appear in the supporting documentation. The authors make no claims *
12 * about the suitability of this software for any purpose. It is *
13 * provided "as is" without express or implied warranty. *
14 **************************************************************************/
15
16
17///////////////////////////////////////////////////////////////////////////
18// Class TStatToolkit
19//
20// Subset of matheamtical functions not included in the TMath
21//
22
23///////////////////////////////////////////////////////////////////////////
24#include "TMath.h"
25#include "Riostream.h"
26#include "TH1F.h"
27#include "TH3.h"
28#include "TF1.h"
29#include "TTree.h"
30#include "TChain.h"
31#include "TObjString.h"
32#include "TLinearFitter.h"
3d7cc0b4 33#include "TGraph2D.h"
34#include "TGraph.h"
21f3a443 35
36//
37// includes neccessary for test functions
38//
39#include "TSystem.h"
40#include "TRandom.h"
41#include "TStopwatch.h"
42#include "TTreeStream.h"
43
44#include "TStatToolkit.h"
45
46
47ClassImp(TStatToolkit) // Class implementation to enable ROOT I/O
48
49TStatToolkit::TStatToolkit() : TObject()
50{
51 //
52 // Default constructor
53 //
54}
55///////////////////////////////////////////////////////////////////////////
56TStatToolkit::~TStatToolkit()
57{
58 //
59 // Destructor
60 //
61}
62
63
64//_____________________________________________________________________________
65void TStatToolkit::EvaluateUni(Int_t nvectors, Double_t *data, Double_t &mean
66 , Double_t &sigma, Int_t hh)
67{
68 //
69 // Robust estimator in 1D case MI version - (faster than ROOT version)
70 //
71 // For the univariate case
72 // estimates of location and scatter are returned in mean and sigma parameters
73 // the algorithm works on the same principle as in multivariate case -
74 // it finds a subset of size hh with smallest sigma, and then returns mean and
75 // sigma of this subset
76 //
77
78 if (hh==0)
79 hh=(nvectors+2)/2;
80 Double_t faclts[]={2.6477,2.5092,2.3826,2.2662,2.1587,2.0589,1.9660,1.879,1.7973,1.7203,1.6473};
81 Int_t *index=new Int_t[nvectors];
82 TMath::Sort(nvectors, data, index, kFALSE);
83
84 Int_t nquant = TMath::Min(Int_t(Double_t(((hh*1./nvectors)-0.5)*40))+1, 11);
85 Double_t factor = faclts[TMath::Max(0,nquant-1)];
86
87 Double_t sumx =0;
88 Double_t sumx2 =0;
89 Int_t bestindex = -1;
90 Double_t bestmean = 0;
91 Double_t bestsigma = (data[index[nvectors-1]]-data[index[0]]+1.); // maximal possible sigma
92 bestsigma *=bestsigma;
93
94 for (Int_t i=0; i<hh; i++){
95 sumx += data[index[i]];
96 sumx2 += data[index[i]]*data[index[i]];
97 }
98
99 Double_t norm = 1./Double_t(hh);
100 Double_t norm2 = 1./Double_t(hh-1);
101 for (Int_t i=hh; i<nvectors; i++){
102 Double_t cmean = sumx*norm;
103 Double_t csigma = (sumx2 - hh*cmean*cmean)*norm2;
104 if (csigma<bestsigma){
105 bestmean = cmean;
106 bestsigma = csigma;
107 bestindex = i-hh;
108 }
109
110 sumx += data[index[i]]-data[index[i-hh]];
111 sumx2 += data[index[i]]*data[index[i]]-data[index[i-hh]]*data[index[i-hh]];
112 }
113
114 Double_t bstd=factor*TMath::Sqrt(TMath::Abs(bestsigma));
115 mean = bestmean;
116 sigma = bstd;
117 delete [] index;
118
119}
120
121
122
123void TStatToolkit::EvaluateUniExternal(Int_t nvectors, Double_t *data, Double_t &mean, Double_t &sigma, Int_t hh, Float_t externalfactor)
124{
125 // Modified version of ROOT robust EvaluateUni
126 // robust estimator in 1D case MI version
127 // added external factor to include precision of external measurement
128 //
129
130 if (hh==0)
131 hh=(nvectors+2)/2;
132 Double_t faclts[]={2.6477,2.5092,2.3826,2.2662,2.1587,2.0589,1.9660,1.879,1.7973,1.7203,1.6473};
133 Int_t *index=new Int_t[nvectors];
134 TMath::Sort(nvectors, data, index, kFALSE);
135 //
136 Int_t nquant = TMath::Min(Int_t(Double_t(((hh*1./nvectors)-0.5)*40))+1, 11);
137 Double_t factor = faclts[0];
138 if (nquant>0){
139 // fix proper normalization - Anja
140 factor = faclts[nquant-1];
141 }
142
143 //
144 //
145 Double_t sumx =0;
146 Double_t sumx2 =0;
147 Int_t bestindex = -1;
148 Double_t bestmean = 0;
149 Double_t bestsigma = -1;
150 for (Int_t i=0; i<hh; i++){
151 sumx += data[index[i]];
152 sumx2 += data[index[i]]*data[index[i]];
153 }
154 //
155 Double_t kfactor = 2.*externalfactor - externalfactor*externalfactor;
156 Double_t norm = 1./Double_t(hh);
157 for (Int_t i=hh; i<nvectors; i++){
158 Double_t cmean = sumx*norm;
159 Double_t csigma = (sumx2*norm - cmean*cmean*kfactor);
160 if (csigma<bestsigma || bestsigma<0){
161 bestmean = cmean;
162 bestsigma = csigma;
163 bestindex = i-hh;
164 }
165 //
166 //
167 sumx += data[index[i]]-data[index[i-hh]];
168 sumx2 += data[index[i]]*data[index[i]]-data[index[i-hh]]*data[index[i-hh]];
169 }
170
171 Double_t bstd=factor*TMath::Sqrt(TMath::Abs(bestsigma));
172 mean = bestmean;
173 sigma = bstd;
174 delete [] index;
175}
176
177
178//_____________________________________________________________________________
179Int_t TStatToolkit::Freq(Int_t n, const Int_t *inlist
180 , Int_t *outlist, Bool_t down)
181{
182 //
183 // Sort eleements according occurancy
184 // The size of output array has is 2*n
185 //
186
187 Int_t * sindexS = new Int_t[n]; // temp array for sorting
188 Int_t * sindexF = new Int_t[2*n];
b8072cce 189 for (Int_t i=0;i<n;i++) sindexS[i]=0;
190 for (Int_t i=0;i<2*n;i++) sindexF[i]=0;
21f3a443 191 //
192 TMath::Sort(n,inlist, sindexS, down);
193 Int_t last = inlist[sindexS[0]];
194 Int_t val = last;
195 sindexF[0] = 1;
196 sindexF[0+n] = last;
197 Int_t countPos = 0;
198 //
199 // find frequency
200 for(Int_t i=1;i<n; i++){
201 val = inlist[sindexS[i]];
202 if (last == val) sindexF[countPos]++;
203 else{
204 countPos++;
205 sindexF[countPos+n] = val;
206 sindexF[countPos]++;
207 last =val;
208 }
209 }
210 if (last==val) countPos++;
211 // sort according frequency
212 TMath::Sort(countPos, sindexF, sindexS, kTRUE);
213 for (Int_t i=0;i<countPos;i++){
214 outlist[2*i ] = sindexF[sindexS[i]+n];
215 outlist[2*i+1] = sindexF[sindexS[i]];
216 }
217 delete [] sindexS;
218 delete [] sindexF;
219
220 return countPos;
221
222}
223
224//___TStatToolkit__________________________________________________________________________
3d7cc0b4 225void TStatToolkit::TruncatedMean(const TH1 * his, TVectorD *param, Float_t down, Float_t up, Bool_t verbose){
21f3a443 226 //
227 //
228 //
229 Int_t nbins = his->GetNbinsX();
230 Float_t nentries = his->GetEntries();
231 Float_t sum =0;
232 Float_t mean = 0;
233 Float_t sigma2 = 0;
234 Float_t ncumul=0;
235 for (Int_t ibin=1;ibin<nbins; ibin++){
236 ncumul+= his->GetBinContent(ibin);
237 Float_t fraction = Float_t(ncumul)/Float_t(nentries);
238 if (fraction>down && fraction<up){
239 sum+=his->GetBinContent(ibin);
240 mean+=his->GetBinCenter(ibin)*his->GetBinContent(ibin);
241 sigma2+=his->GetBinCenter(ibin)*his->GetBinCenter(ibin)*his->GetBinContent(ibin);
242 }
243 }
244 mean/=sum;
245 sigma2= TMath::Sqrt(TMath::Abs(sigma2/sum-mean*mean));
246 if (param){
247 (*param)[0] = his->GetMaximum();
248 (*param)[1] = mean;
249 (*param)[2] = sigma2;
250
251 }
252 if (verbose) printf("Mean\t%f\t Sigma2\t%f\n", mean,sigma2);
253}
254
255void TStatToolkit::LTM(TH1F * his, TVectorD *param , Float_t fraction, Bool_t verbose){
256 //
257 // LTM
258 //
259 Int_t nbins = his->GetNbinsX();
260 Int_t nentries = (Int_t)his->GetEntries();
261 Double_t *data = new Double_t[nentries];
262 Int_t npoints=0;
263 for (Int_t ibin=1;ibin<nbins; ibin++){
264 Float_t entriesI = his->GetBinContent(ibin);
265 Float_t xcenter= his->GetBinCenter(ibin);
266 for (Int_t ic=0; ic<entriesI; ic++){
267 if (npoints<nentries){
268 data[npoints]= xcenter;
269 npoints++;
270 }
271 }
272 }
273 Double_t mean, sigma;
274 Int_t npoints2=TMath::Min(Int_t(fraction*Float_t(npoints)),npoints-1);
275 npoints2=TMath::Max(Int_t(0.5*Float_t(npoints)),npoints2);
276 TStatToolkit::EvaluateUni(npoints, data, mean,sigma,npoints2);
277 delete [] data;
278 if (verbose) printf("Mean\t%f\t Sigma2\t%f\n", mean,sigma);if (param){
279 (*param)[0] = his->GetMaximum();
280 (*param)[1] = mean;
281 (*param)[2] = sigma;
282 }
283}
284
94a43b22 285Double_t TStatToolkit::FitGaus(TH1* his, TVectorD *param, TMatrixD */*matrix*/, Float_t xmin, Float_t xmax, Bool_t verbose){
21f3a443 286 //
287 // Fit histogram with gaussian function
288 //
289 // Prameters:
290 // return value- chi2 - if negative ( not enough points)
291 // his - input histogram
292 // param - vector with parameters
293 // xmin, xmax - range to fit - if xmin=xmax=0 - the full histogram range used
294 // Fitting:
295 // 1. Step - make logarithm
296 // 2. Linear fit (parabola) - more robust - always converge
297 // 3. In case of small statistic bins are averaged
298 //
299 static TLinearFitter fitter(3,"pol2");
300 TVectorD par(3);
301 TVectorD sigma(3);
302 TMatrixD mat(3,3);
303 if (his->GetMaximum()<4) return -1;
304 if (his->GetEntries()<12) return -1;
305 if (his->GetRMS()<mat.GetTol()) return -1;
306 Float_t maxEstimate = his->GetEntries()*his->GetBinWidth(1)/TMath::Sqrt((TMath::TwoPi()*his->GetRMS()));
307 Int_t dsmooth = TMath::Nint(6./TMath::Sqrt(maxEstimate));
308
309 if (maxEstimate<1) return -1;
310 Int_t nbins = his->GetNbinsX();
311 Int_t npoints=0;
312 //
313
314
315 if (xmin>=xmax){
316 xmin = his->GetXaxis()->GetXmin();
317 xmax = his->GetXaxis()->GetXmax();
318 }
319 for (Int_t iter=0; iter<2; iter++){
320 fitter.ClearPoints();
321 npoints=0;
322 for (Int_t ibin=1;ibin<nbins+1; ibin++){
323 Int_t countB=1;
324 Float_t entriesI = his->GetBinContent(ibin);
325 for (Int_t delta = -dsmooth; delta<=dsmooth; delta++){
326 if (ibin+delta>1 &&ibin+delta<nbins-1){
327 entriesI += his->GetBinContent(ibin+delta);
328 countB++;
329 }
330 }
331 entriesI/=countB;
332 Double_t xcenter= his->GetBinCenter(ibin);
333 if (xcenter<xmin || xcenter>xmax) continue;
334 Double_t error=1./TMath::Sqrt(countB);
335 Float_t cont=2;
336 if (iter>0){
337 if (par[0]+par[1]*xcenter+par[2]*xcenter*xcenter>20) return 0;
338 cont = TMath::Exp(par[0]+par[1]*xcenter+par[2]*xcenter*xcenter);
339 if (cont>1.) error = 1./TMath::Sqrt(cont*Float_t(countB));
340 }
341 if (entriesI>1&&cont>1){
342 fitter.AddPoint(&xcenter,TMath::Log(Float_t(entriesI)),error);
343 npoints++;
344 }
345 }
346 if (npoints>3){
347 fitter.Eval();
348 fitter.GetParameters(par);
349 }else{
350 break;
351 }
352 }
353 if (npoints<=3){
354 return -1;
355 }
356 fitter.GetParameters(par);
357 fitter.GetCovarianceMatrix(mat);
358 if (TMath::Abs(par[1])<mat.GetTol()) return -1;
359 if (TMath::Abs(par[2])<mat.GetTol()) return -1;
360 Double_t chi2 = fitter.GetChisquare()/Float_t(npoints);
361 //fitter.GetParameters();
362 if (!param) param = new TVectorD(3);
cb1d20de 363 // if (!matrix) matrix = new TMatrixD(3,3); // Covariance matrix to be implemented
21f3a443 364 (*param)[1] = par[1]/(-2.*par[2]);
365 (*param)[2] = 1./TMath::Sqrt(TMath::Abs(-2.*par[2]));
366 (*param)[0] = TMath::Exp(par[0]+ par[1]* (*param)[1] + par[2]*(*param)[1]*(*param)[1]);
367 if (verbose){
368 par.Print();
369 mat.Print();
370 param->Print();
371 printf("Chi2=%f\n",chi2);
372 TF1 * f1= new TF1("f1","[0]*exp(-(x-[1])^2/(2*[2]*[2]))",his->GetXaxis()->GetXmin(),his->GetXaxis()->GetXmax());
373 f1->SetParameter(0, (*param)[0]);
374 f1->SetParameter(1, (*param)[1]);
375 f1->SetParameter(2, (*param)[2]);
376 f1->Draw("same");
377 }
378 return chi2;
379}
380
cb1d20de 381Double_t TStatToolkit::FitGaus(Float_t *arr, Int_t nBins, Float_t xMin, Float_t xMax, TVectorD *param, TMatrixD */*matrix*/, Bool_t verbose){
21f3a443 382 //
383 // Fit histogram with gaussian function
384 //
385 // Prameters:
386 // nbins: size of the array and number of histogram bins
387 // xMin, xMax: histogram range
388 // param: paramters of the fit (0-Constant, 1-Mean, 2-Sigma)
389 // matrix: covariance matrix -- not implemented yet, pass dummy matrix!!!
390 //
391 // Return values:
392 // >0: the chi2 returned by TLinearFitter
393 // -3: only three points have been used for the calculation - no fitter was used
394 // -2: only two points have been used for the calculation - center of gravity was uesed for calculation
395 // -1: only one point has been used for the calculation - center of gravity was uesed for calculation
396 // -4: invalid result!!
397 //
398 // Fitting:
399 // 1. Step - make logarithm
400 // 2. Linear fit (parabola) - more robust - always converge
401 //
402 static TLinearFitter fitter(3,"pol2");
403 static TMatrixD mat(3,3);
404 static Double_t kTol = mat.GetTol();
405 fitter.StoreData(kFALSE);
406 fitter.ClearPoints();
407 TVectorD par(3);
408 TVectorD sigma(3);
3d7cc0b4 409 TMatrixD matA(3,3);
21f3a443 410 TMatrixD b(3,1);
411 Float_t rms = TMath::RMS(nBins,arr);
412 Float_t max = TMath::MaxElement(nBins,arr);
413 Float_t binWidth = (xMax-xMin)/(Float_t)nBins;
414
415 Float_t meanCOG = 0;
416 Float_t rms2COG = 0;
417 Float_t sumCOG = 0;
418
419 Float_t entries = 0;
420 Int_t nfilled=0;
421
422 for (Int_t i=0; i<nBins; i++){
423 entries+=arr[i];
424 if (arr[i]>0) nfilled++;
425 }
426
427 if (max<4) return -4;
428 if (entries<12) return -4;
429 if (rms<kTol) return -4;
430
431 Int_t npoints=0;
432 //
433
434 //
435 for (Int_t ibin=0;ibin<nBins; ibin++){
436 Float_t entriesI = arr[ibin];
437 if (entriesI>1){
438 Double_t xcenter = xMin+(ibin+0.5)*binWidth;
439
440 Float_t error = 1./TMath::Sqrt(entriesI);
441 Float_t val = TMath::Log(Float_t(entriesI));
442 fitter.AddPoint(&xcenter,val,error);
443 if (npoints<3){
3d7cc0b4 444 matA(npoints,0)=1;
445 matA(npoints,1)=xcenter;
446 matA(npoints,2)=xcenter*xcenter;
21f3a443 447 b(npoints,0)=val;
448 meanCOG+=xcenter*entriesI;
449 rms2COG +=xcenter*entriesI*xcenter;
450 sumCOG +=entriesI;
451 }
452 npoints++;
453 }
454 }
455
456
457 Double_t chi2 = 0;
458 if (npoints>=3){
459 if ( npoints == 3 ){
460 //analytic calculation of the parameters for three points
3d7cc0b4 461 matA.Invert();
21f3a443 462 TMatrixD res(1,3);
3d7cc0b4 463 res.Mult(matA,b);
21f3a443 464 par[0]=res(0,0);
465 par[1]=res(0,1);
466 par[2]=res(0,2);
467 chi2 = -3.;
468 } else {
469 // use fitter for more than three points
470 fitter.Eval();
471 fitter.GetParameters(par);
472 fitter.GetCovarianceMatrix(mat);
473 chi2 = fitter.GetChisquare()/Float_t(npoints);
474 }
475 if (TMath::Abs(par[1])<kTol) return -4;
476 if (TMath::Abs(par[2])<kTol) return -4;
477
478 if (!param) param = new TVectorD(3);
cb1d20de 479 //if (!matrix) matrix = new TMatrixD(3,3); // !!!!might be a memory leek. use dummy matrix pointer to call this function! // Covariance matrix to be implemented
21f3a443 480
481 (*param)[1] = par[1]/(-2.*par[2]);
482 (*param)[2] = 1./TMath::Sqrt(TMath::Abs(-2.*par[2]));
483 Double_t lnparam0 = par[0]+ par[1]* (*param)[1] + par[2]*(*param)[1]*(*param)[1];
484 if ( lnparam0>307 ) return -4;
485 (*param)[0] = TMath::Exp(lnparam0);
486 if (verbose){
487 par.Print();
488 mat.Print();
489 param->Print();
490 printf("Chi2=%f\n",chi2);
491 TF1 * f1= new TF1("f1","[0]*exp(-(x-[1])^2/(2*[2]*[2]))",xMin,xMax);
492 f1->SetParameter(0, (*param)[0]);
493 f1->SetParameter(1, (*param)[1]);
494 f1->SetParameter(2, (*param)[2]);
495 f1->Draw("same");
496 }
497 return chi2;
498 }
499
500 if (npoints == 2){
501 //use center of gravity for 2 points
502 meanCOG/=sumCOG;
503 rms2COG /=sumCOG;
504 (*param)[0] = max;
505 (*param)[1] = meanCOG;
506 (*param)[2] = TMath::Sqrt(TMath::Abs(meanCOG*meanCOG-rms2COG));
507 chi2=-2.;
508 }
509 if ( npoints == 1 ){
510 meanCOG/=sumCOG;
511 (*param)[0] = max;
512 (*param)[1] = meanCOG;
513 (*param)[2] = binWidth/TMath::Sqrt(12);
514 chi2=-1.;
515 }
516 return chi2;
517
518}
519
520
3d7cc0b4 521Float_t TStatToolkit::GetCOG(const Short_t *arr, Int_t nBins, Float_t xMin, Float_t xMax, Float_t *rms, Float_t *sum)
21f3a443 522{
523 //
524 // calculate center of gravity rms and sum for array 'arr' with nBins an a x range xMin to xMax
525 // return COG; in case of failure return xMin
526 //
527 Float_t meanCOG = 0;
528 Float_t rms2COG = 0;
529 Float_t sumCOG = 0;
530 Int_t npoints = 0;
531
532 Float_t binWidth = (xMax-xMin)/(Float_t)nBins;
533
534 for (Int_t ibin=0; ibin<nBins; ibin++){
535 Float_t entriesI = (Float_t)arr[ibin];
536 Double_t xcenter = xMin+(ibin+0.5)*binWidth;
537 if ( entriesI>0 ){
538 meanCOG += xcenter*entriesI;
539 rms2COG += xcenter*entriesI*xcenter;
540 sumCOG += entriesI;
541 npoints++;
542 }
543 }
544 if ( sumCOG == 0 ) return xMin;
545 meanCOG/=sumCOG;
546
547 if ( rms ){
548 rms2COG /=sumCOG;
549 (*rms) = TMath::Sqrt(TMath::Abs(meanCOG*meanCOG-rms2COG));
550 if ( npoints == 1 ) (*rms) = binWidth/TMath::Sqrt(12);
551 }
552
553 if ( sum )
554 (*sum) = sumCOG;
555
556 return meanCOG;
557}
558
559
560
561///////////////////////////////////////////////////////////////
562////////////// TEST functions /////////////////////////
563///////////////////////////////////////////////////////////////
564
565
566
567
568
569void TStatToolkit::TestGausFit(Int_t nhistos){
570 //
571 // Test performance of the parabolic - gaussian fit - compare it with
572 // ROOT gauss fit
573 // nhistos - number of histograms to be used for test
574 //
575 TTreeSRedirector *pcstream = new TTreeSRedirector("fitdebug.root");
576
577 Float_t *xTrue = new Float_t[nhistos];
578 Float_t *sTrue = new Float_t[nhistos];
579 TVectorD **par1 = new TVectorD*[nhistos];
580 TVectorD **par2 = new TVectorD*[nhistos];
581 TMatrixD dummy(3,3);
582
583
584 TH1F **h1f = new TH1F*[nhistos];
585 TF1 *myg = new TF1("myg","gaus");
586 TF1 *fit = new TF1("fit","gaus");
587 gRandom->SetSeed(0);
588
589 //init
590 for (Int_t i=0;i<nhistos; i++){
591 par1[i] = new TVectorD(3);
592 par2[i] = new TVectorD(3);
593 h1f[i] = new TH1F(Form("h1f%d",i),Form("h1f%d",i),20,-10,10);
594 xTrue[i]= gRandom->Rndm();
595 gSystem->Sleep(2);
596 sTrue[i]= .75+gRandom->Rndm()*.5;
597 myg->SetParameters(1,xTrue[i],sTrue[i]);
598 h1f[i]->FillRandom("myg");
599 }
600
601 TStopwatch s;
602 s.Start();
603 //standard gaus fit
604 for (Int_t i=0; i<nhistos; i++){
605 h1f[i]->Fit(fit,"0q");
606 (*par1[i])(0) = fit->GetParameter(0);
607 (*par1[i])(1) = fit->GetParameter(1);
608 (*par1[i])(2) = fit->GetParameter(2);
609 }
610 s.Stop();
611 printf("Gaussian fit\t");
612 s.Print();
613
614 s.Start();
615 //TStatToolkit gaus fit
616 for (Int_t i=0; i<nhistos; i++){
617 TStatToolkit::FitGaus(h1f[i]->GetArray()+1,h1f[i]->GetNbinsX(),h1f[i]->GetXaxis()->GetXmin(),h1f[i]->GetXaxis()->GetXmax(),par2[i],&dummy);
618 }
619
620 s.Stop();
621 printf("Parabolic fit\t");
622 s.Print();
623 //write stream
624 for (Int_t i=0;i<nhistos; i++){
625 Float_t xt = xTrue[i];
626 Float_t st = sTrue[i];
627 (*pcstream)<<"data"
628 <<"xTrue="<<xt
629 <<"sTrue="<<st
630 <<"pg.="<<(par1[i])
631 <<"pa.="<<(par2[i])
632 <<"\n";
633 }
634 //delete pointers
635 for (Int_t i=0;i<nhistos; i++){
636 delete par1[i];
637 delete par2[i];
638 delete h1f[i];
639 }
640 delete pcstream;
641 delete []h1f;
642 delete []xTrue;
643 delete []sTrue;
644 //
645 delete []par1;
646 delete []par2;
647
648}
649
650
651
652TGraph2D * TStatToolkit::MakeStat2D(TH3 * his, Int_t delta0, Int_t delta1, Int_t type){
653 //
654 //
655 //
656 // delta - number of bins to integrate
657 // type - 0 - mean value
658
659 TAxis * xaxis = his->GetXaxis();
660 TAxis * yaxis = his->GetYaxis();
661 // TAxis * zaxis = his->GetZaxis();
662 Int_t nbinx = xaxis->GetNbins();
663 Int_t nbiny = yaxis->GetNbins();
664 char name[1000];
665 Int_t icount=0;
666 TGraph2D *graph = new TGraph2D(nbinx*nbiny);
667 TF1 f1("f1","gaus");
668 for (Int_t ix=0; ix<nbinx;ix++)
669 for (Int_t iy=0; iy<nbiny;iy++){
670 Float_t xcenter = xaxis->GetBinCenter(ix);
671 Float_t ycenter = yaxis->GetBinCenter(iy);
cb1d20de 672 snprintf(name,1000,"%s_%d_%d",his->GetName(), ix,iy);
21f3a443 673 TH1 *projection = his->ProjectionZ(name,ix-delta0,ix+delta0,iy-delta1,iy+delta1);
674 Float_t stat= 0;
675 if (type==0) stat = projection->GetMean();
676 if (type==1) stat = projection->GetRMS();
677 if (type==2 || type==3){
678 TVectorD vec(3);
679 TStatToolkit::LTM((TH1F*)projection,&vec,0.7);
680 if (type==2) stat= vec[1];
681 if (type==3) stat= vec[0];
682 }
683 if (type==4|| type==5){
684 projection->Fit(&f1);
685 if (type==4) stat= f1.GetParameter(1);
686 if (type==5) stat= f1.GetParameter(2);
687 }
688 //printf("%d\t%f\t%f\t%f\n", icount,xcenter, ycenter, stat);
689 graph->SetPoint(icount,xcenter, ycenter, stat);
690 icount++;
691 }
692 return graph;
693}
694
695TGraph * TStatToolkit::MakeStat1D(TH3 * his, Int_t delta1, Int_t type){
696 //
697 //
698 //
699 // delta - number of bins to integrate
700 // type - 0 - mean value
701
702 TAxis * xaxis = his->GetXaxis();
703 TAxis * yaxis = his->GetYaxis();
704 // TAxis * zaxis = his->GetZaxis();
705 Int_t nbinx = xaxis->GetNbins();
706 Int_t nbiny = yaxis->GetNbins();
707 char name[1000];
708 Int_t icount=0;
709 TGraph *graph = new TGraph(nbinx);
710 TF1 f1("f1","gaus");
711 for (Int_t ix=0; ix<nbinx;ix++){
712 Float_t xcenter = xaxis->GetBinCenter(ix);
713 // Float_t ycenter = yaxis->GetBinCenter(iy);
cb1d20de 714 snprintf(name,1000,"%s_%d",his->GetName(), ix);
21f3a443 715 TH1 *projection = his->ProjectionZ(name,ix-delta1,ix+delta1,0,nbiny);
716 Float_t stat= 0;
717 if (type==0) stat = projection->GetMean();
718 if (type==1) stat = projection->GetRMS();
719 if (type==2 || type==3){
720 TVectorD vec(3);
721 TStatToolkit::LTM((TH1F*)projection,&vec,0.7);
722 if (type==2) stat= vec[1];
723 if (type==3) stat= vec[0];
724 }
725 if (type==4|| type==5){
726 projection->Fit(&f1);
727 if (type==4) stat= f1.GetParameter(1);
728 if (type==5) stat= f1.GetParameter(2);
729 }
730 //printf("%d\t%f\t%f\t%f\n", icount,xcenter, ycenter, stat);
731 graph->SetPoint(icount,xcenter, stat);
732 icount++;
733 }
734 return graph;
735}
736
737
738
739
740
88b1c775 741TString* TStatToolkit::FitPlane(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop,Bool_t fix0){
21f3a443 742 //
743 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
744 // returns chi2, fitParam and covMatrix
745 // returns TString with fitted formula
746 //
dd46129c 747
21f3a443 748 TString formulaStr(formula);
749 TString drawStr(drawCommand);
750 TString cutStr(cuts);
dd46129c 751 TString ferr("1");
752
753 TString strVal(drawCommand);
754 if (strVal.Contains(":")){
755 TObjArray* valTokens = strVal.Tokenize(":");
756 drawStr = valTokens->At(0)->GetName();
757 ferr = valTokens->At(1)->GetName();
758 }
759
21f3a443 760
761 formulaStr.ReplaceAll("++", "~");
762 TObjArray* formulaTokens = formulaStr.Tokenize("~");
763 Int_t dim = formulaTokens->GetEntriesFast();
764
765 fitParam.ResizeTo(dim);
766 covMatrix.ResizeTo(dim,dim);
767
768 TLinearFitter* fitter = new TLinearFitter(dim+1, Form("hyp%d",dim));
769 fitter->StoreData(kTRUE);
770 fitter->ClearPoints();
771
772 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
773 if (entries == -1) return new TString("An ERROR has occured during fitting!");
774 Double_t **values = new Double_t*[dim+1] ;
dd46129c 775 //
776 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 777 if (entries == -1) {
778 delete []values;
779 return new TString("An ERROR has occured during fitting!");
780 }
dd46129c 781 Double_t *errors = new Double_t[entries];
782 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
21f3a443 783
784 for (Int_t i = 0; i < dim + 1; i++){
785 Int_t centries = 0;
786 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
787 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
788
b8072cce 789 if (entries != centries) {
790 delete []errors;
791 delete []values;
792 return new TString("An ERROR has occured during fitting!");
793 }
21f3a443 794 values[i] = new Double_t[entries];
795 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
796 }
797
798 // add points to the fitter
799 for (Int_t i = 0; i < entries; i++){
800 Double_t x[1000];
801 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
dd46129c 802 fitter->AddPoint(x, values[dim][i], errors[i]);
21f3a443 803 }
804
805 fitter->Eval();
2c629c56 806 if (frac>0.5 && frac<1){
807 fitter->EvalRobust(frac);
88b1c775 808 }else{
809 if (fix0) {
810 fitter->FixParameter(0,0);
811 fitter->Eval();
812 }
2c629c56 813 }
21f3a443 814 fitter->GetParameters(fitParam);
815 fitter->GetCovarianceMatrix(covMatrix);
816 chi2 = fitter->GetChisquare();
b8072cce 817 npoints = entries;
21f3a443 818 TString *preturnFormula = new TString(Form("( %f+",fitParam[0])), &returnFormula = *preturnFormula;
819
820 for (Int_t iparam = 0; iparam < dim; iparam++) {
821 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam+1]));
822 if (iparam < dim-1) returnFormula.Append("+");
823 }
824 returnFormula.Append(" )");
4d61c301 825
826
b8072cce 827 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
4d61c301 828
829
cb1d20de 830 delete formulaTokens;
831 delete fitter;
832 delete[] values;
b8072cce 833 delete[] errors;
cb1d20de 834 return preturnFormula;
835}
836
837TString* TStatToolkit::FitPlaneConstrain(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop,Double_t constrain){
838 //
839 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
840 // returns chi2, fitParam and covMatrix
841 // returns TString with fitted formula
842 //
843
844 TString formulaStr(formula);
845 TString drawStr(drawCommand);
846 TString cutStr(cuts);
847 TString ferr("1");
848
849 TString strVal(drawCommand);
850 if (strVal.Contains(":")){
851 TObjArray* valTokens = strVal.Tokenize(":");
852 drawStr = valTokens->At(0)->GetName();
853 ferr = valTokens->At(1)->GetName();
854 }
855
856
857 formulaStr.ReplaceAll("++", "~");
858 TObjArray* formulaTokens = formulaStr.Tokenize("~");
859 Int_t dim = formulaTokens->GetEntriesFast();
860
861 fitParam.ResizeTo(dim);
862 covMatrix.ResizeTo(dim,dim);
863
864 TLinearFitter* fitter = new TLinearFitter(dim+1, Form("hyp%d",dim));
865 fitter->StoreData(kTRUE);
866 fitter->ClearPoints();
867
868 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
869 if (entries == -1) return new TString("An ERROR has occured during fitting!");
870 Double_t **values = new Double_t*[dim+1] ;
871 //
872 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 873 if (entries == -1) {
874 delete [] values;
875 return new TString("An ERROR has occured during fitting!");
876 }
cb1d20de 877 Double_t *errors = new Double_t[entries];
878 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
879
880 for (Int_t i = 0; i < dim + 1; i++){
881 Int_t centries = 0;
882 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
883 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
884
b8072cce 885 if (entries != centries) {
886 delete []errors;
887 delete []values;
888 return new TString("An ERROR has occured during fitting!");
889 }
cb1d20de 890 values[i] = new Double_t[entries];
891 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
892 }
893
894 // add points to the fitter
895 for (Int_t i = 0; i < entries; i++){
896 Double_t x[1000];
897 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
898 fitter->AddPoint(x, values[dim][i], errors[i]);
899 }
900 if (constrain>0){
901 for (Int_t i = 0; i < dim; i++){
902 Double_t x[1000];
903 for (Int_t j=0; j<dim;j++) if (i!=j) x[j]=0;
904 x[i]=1.;
905 fitter->AddPoint(x, 0, constrain);
906 }
907 }
908
909
910 fitter->Eval();
911 if (frac>0.5 && frac<1){
912 fitter->EvalRobust(frac);
913 }
914 fitter->GetParameters(fitParam);
915 fitter->GetCovarianceMatrix(covMatrix);
916 chi2 = fitter->GetChisquare();
917 npoints = entries;
cb1d20de 918
919 TString *preturnFormula = new TString(Form("( %f+",fitParam[0])), &returnFormula = *preturnFormula;
920
921 for (Int_t iparam = 0; iparam < dim; iparam++) {
922 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam+1]));
923 if (iparam < dim-1) returnFormula.Append("+");
924 }
925 returnFormula.Append(" )");
926
b8072cce 927 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
cb1d20de 928
929
930
931 delete formulaTokens;
932 delete fitter;
933 delete[] values;
b8072cce 934 delete[] errors;
cb1d20de 935 return preturnFormula;
936}
937
938
939
940TString* TStatToolkit::FitPlaneFixed(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop){
941 //
942 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
943 // returns chi2, fitParam and covMatrix
944 // returns TString with fitted formula
945 //
946
947 TString formulaStr(formula);
948 TString drawStr(drawCommand);
949 TString cutStr(cuts);
950 TString ferr("1");
951
952 TString strVal(drawCommand);
953 if (strVal.Contains(":")){
954 TObjArray* valTokens = strVal.Tokenize(":");
955 drawStr = valTokens->At(0)->GetName();
956 ferr = valTokens->At(1)->GetName();
957 }
958
959
960 formulaStr.ReplaceAll("++", "~");
961 TObjArray* formulaTokens = formulaStr.Tokenize("~");
962 Int_t dim = formulaTokens->GetEntriesFast();
963
964 fitParam.ResizeTo(dim);
965 covMatrix.ResizeTo(dim,dim);
966 TString fitString="x0";
967 for (Int_t i=1; i<dim; i++) fitString+=Form("++x%d",i);
968 TLinearFitter* fitter = new TLinearFitter(dim, fitString.Data());
969 fitter->StoreData(kTRUE);
970 fitter->ClearPoints();
971
972 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
973 if (entries == -1) return new TString("An ERROR has occured during fitting!");
974 Double_t **values = new Double_t*[dim+1] ;
975 //
976 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 977 if (entries == -1) {
978 delete []values;
979 return new TString("An ERROR has occured during fitting!");
980 }
cb1d20de 981 Double_t *errors = new Double_t[entries];
982 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
983
984 for (Int_t i = 0; i < dim + 1; i++){
985 Int_t centries = 0;
986 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
987 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
988
b8072cce 989 if (entries != centries) {
990 delete []errors;
991 delete []values;
992 return new TString("An ERROR has occured during fitting!");
993 }
cb1d20de 994 values[i] = new Double_t[entries];
995 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
996 }
997
998 // add points to the fitter
999 for (Int_t i = 0; i < entries; i++){
1000 Double_t x[1000];
1001 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
1002 fitter->AddPoint(x, values[dim][i], errors[i]);
1003 }
1004
1005 fitter->Eval();
1006 if (frac>0.5 && frac<1){
1007 fitter->EvalRobust(frac);
1008 }
1009 fitter->GetParameters(fitParam);
1010 fitter->GetCovarianceMatrix(covMatrix);
1011 chi2 = fitter->GetChisquare();
1012 npoints = entries;
cb1d20de 1013
1014 TString *preturnFormula = new TString("("), &returnFormula = *preturnFormula;
1015
1016 for (Int_t iparam = 0; iparam < dim; iparam++) {
1017 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam]));
1018 if (iparam < dim-1) returnFormula.Append("+");
1019 }
1020 returnFormula.Append(" )");
1021
1022
b8072cce 1023 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
cb1d20de 1024
21f3a443 1025 delete formulaTokens;
1026 delete fitter;
1027 delete[] values;
b8072cce 1028 delete[] errors;
21f3a443 1029 return preturnFormula;
1030}
7c9cf6e4 1031
1032
1033
1034
1035
3d7cc0b4 1036Int_t TStatToolkit::GetFitIndex(const TString fString, const TString subString){
7c9cf6e4 1037 //
1038 // fitString - ++ separated list of fits
1039 // substring - ++ separated list of the requiered substrings
1040 //
1041 // return the last occurance of substring in fit string
1042 //
1043 TObjArray *arrFit = fString.Tokenize("++");
1044 TObjArray *arrSub = subString.Tokenize("++");
1045 Int_t index=-1;
1046 for (Int_t i=0; i<arrFit->GetEntries(); i++){
1047 Bool_t isOK=kTRUE;
1048 TString str =arrFit->At(i)->GetName();
1049 for (Int_t isub=0; isub<arrSub->GetEntries(); isub++){
1050 if (str.Contains(arrSub->At(isub)->GetName())==0) isOK=kFALSE;
1051 }
1052 if (isOK) index=i;
1053 }
1054 return index;
1055}
1056
1057
3d7cc0b4 1058TString TStatToolkit::FilterFit(const TString &input, const TString filter, TVectorD &param, TMatrixD & covar){
7c9cf6e4 1059 //
1060 // Filter fit expression make sub-fit
1061 //
1062 TObjArray *array0= input.Tokenize("++");
1063 TObjArray *array1= filter.Tokenize("++");
1064 //TString *presult=new TString("(0");
1065 TString result="(0.0";
1066 for (Int_t i=0; i<array0->GetEntries(); i++){
1067 Bool_t isOK=kTRUE;
1068 TString str(array0->At(i)->GetName());
1069 for (Int_t j=0; j<array1->GetEntries(); j++){
1070 if (str.Contains(array1->At(j)->GetName())==0) isOK=kFALSE;
1071 }
1072 if (isOK) {
1073 result+="+"+str;
1074 result+=Form("*(%f)",param[i+1]);
1075 printf("%f\t%f\t%s\n",param[i+1], TMath::Sqrt(covar(i+1,i+1)),str.Data());
1076 }
1077 }
1078 result+="-0.)";
1079 return result;
1080}
1081
1082void TStatToolkit::Update1D(Double_t delta, Double_t sigma, Int_t s1, TMatrixD &vecXk, TMatrixD &covXk){
1083 //
1084 // Update parameters and covariance - with one measurement
1085 // Input:
1086 // vecXk - input vector - Updated in function
1087 // covXk - covariance matrix - Updated in function
1088 // delta, sigma, s1 - new measurement, rms of new measurement and the index of measurement
1089 const Int_t knMeas=1;
1090 Int_t knElem=vecXk.GetNrows();
1091
1092 TMatrixD mat1(knElem,knElem); // update covariance matrix
1093 TMatrixD matHk(1,knElem); // vector to mesurement
1094 TMatrixD vecYk(knMeas,1); // Innovation or measurement residual
1095 TMatrixD matHkT(knElem,knMeas); // helper matrix Hk transpose
1096 TMatrixD matSk(knMeas,knMeas); // Innovation (or residual) covariance
1097 TMatrixD matKk(knElem,knMeas); // Optimal Kalman gain
1098 TMatrixD covXk2(knElem,knElem); // helper matrix
1099 TMatrixD covXk3(knElem,knElem); // helper matrix
1100 TMatrixD vecZk(1,1);
1101 TMatrixD measR(1,1);
1102 vecZk(0,0)=delta;
1103 measR(0,0)=sigma*sigma;
1104 //
1105 // reset matHk
1106 for (Int_t iel=0;iel<knElem;iel++)
1107 for (Int_t ip=0;ip<knMeas;ip++) matHk(ip,iel)=0;
1108 //mat1
1109 for (Int_t iel=0;iel<knElem;iel++) {
1110 for (Int_t jel=0;jel<knElem;jel++) mat1(iel,jel)=0;
1111 mat1(iel,iel)=1;
1112 }
1113 //
1114 matHk(0, s1)=1;
1115 vecYk = vecZk-matHk*vecXk; // Innovation or measurement residual
1116 matHkT=matHk.T(); matHk.T();
1117 matSk = (matHk*(covXk*matHkT))+measR; // Innovation (or residual) covariance
1118 matSk.Invert();
1119 matKk = (covXk*matHkT)*matSk; // Optimal Kalman gain
1120 vecXk += matKk*vecYk; // updated vector
1121 covXk2= (mat1-(matKk*matHk));
1122 covXk3 = covXk2*covXk;
1123 covXk = covXk3;
1124 Int_t nrows=covXk3.GetNrows();
1125
1126 for (Int_t irow=0; irow<nrows; irow++)
1127 for (Int_t icol=0; icol<nrows; icol++){
1128 // rounding problems - make matrix again symteric
1129 covXk(irow,icol)=(covXk3(irow,icol)+covXk3(icol,irow))*0.5;
1130 }
1131}
1132
1133
1134
3d7cc0b4 1135void TStatToolkit::Constrain1D(const TString &input, const TString filter, TVectorD &param, TMatrixD & covar, Double_t mean, Double_t sigma){
7c9cf6e4 1136 //
1137 // constrain linear fit
1138 // input - string description of fit function
1139 // filter - string filter to select sub fits
1140 // param,covar - parameters and covariance matrix of the fit
1141 // mean,sigma - new measurement uning which the fit is updated
1142 //
ae45c94d 1143
7c9cf6e4 1144 TObjArray *array0= input.Tokenize("++");
1145 TObjArray *array1= filter.Tokenize("++");
1146 TMatrixD paramM(param.GetNrows(),1);
1147 for (Int_t i=0; i<=array0->GetEntries(); i++){paramM(i,0)=param(i);}
1148
ae45c94d 1149 if (filter.Length()==0){
1150 TStatToolkit::Update1D(mean, sigma, 0, paramM, covar);//
1151 }else{
1152 for (Int_t i=0; i<array0->GetEntries(); i++){
1153 Bool_t isOK=kTRUE;
1154 TString str(array0->At(i)->GetName());
1155 for (Int_t j=0; j<array1->GetEntries(); j++){
1156 if (str.Contains(array1->At(j)->GetName())==0) isOK=kFALSE;
1157 }
1158 if (isOK) {
1159 TStatToolkit::Update1D(mean, sigma, i+1, paramM, covar);//
1160 }
7c9cf6e4 1161 }
1162 }
1163 for (Int_t i=0; i<=array0->GetEntries(); i++){
1164 param(i)=paramM(i,0);
1165 }
1166}
1167
ae45c94d 1168TString TStatToolkit::MakeFitString(const TString &input, const TVectorD &param, const TMatrixD & covar, Bool_t verbose){
7c9cf6e4 1169 //
1170 //
1171 //
1172 TObjArray *array0= input.Tokenize("++");
ae45c94d 1173 TString result=Form("(%f",param[0]);
1174 printf("%f\t%f\t\n", param[0], TMath::Sqrt(covar(0,0)));
7c9cf6e4 1175 for (Int_t i=0; i<array0->GetEntries(); i++){
1176 TString str(array0->At(i)->GetName());
1177 result+="+"+str;
1178 result+=Form("*(%f)",param[i+1]);
ae45c94d 1179 if (verbose) printf("%f\t%f\t%s\n", param[i+1], TMath::Sqrt(covar(i+1,i+1)),str.Data());
7c9cf6e4 1180 }
1181 result+="-0.)";
1182 return result;
1183}
df0a2a0a 1184
1185
1186TGraph * TStatToolkit::MakeGraphSparse(TTree * tree, const char * expr, const char * cut){
1187 //
1188 // Make a sparse draw of the variables
1189 //
1190 const Int_t entries = tree->Draw(expr,cut,"goff");
1191 // TGraph * graph = (TGraph*)gPad->GetPrimitive("Graph"); // 2D
1192 TGraph * graph = new TGraph (entries, tree->GetV2(),tree->GetV1());
1193 //
1194 Int_t *index = new Int_t[entries];
1195 TMath::Sort(entries,graph->GetX(),index,kFALSE);
1196
3d7cc0b4 1197 Double_t *tempArray = new Double_t[entries];
df0a2a0a 1198
1199 Double_t count = 0.5;
baa0041d 1200 Double_t *vrun = new Double_t[entries];
1201 Int_t icount=0;
1202 //
3d7cc0b4 1203 tempArray[index[0]] = count;
baa0041d 1204 vrun[0] = graph->GetX()[index[0]];
df0a2a0a 1205 for(Int_t i=1;i<entries;i++){
1206 if(graph->GetX()[index[i]]==graph->GetX()[index[i-1]])
3d7cc0b4 1207 tempArray[index[i]] = count;
df0a2a0a 1208 else if(graph->GetX()[index[i]]!=graph->GetX()[index[i-1]]){
1209 count++;
baa0041d 1210 icount++;
3d7cc0b4 1211 tempArray[index[i]] = count;
baa0041d 1212 vrun[icount]=graph->GetX()[index[i]];
df0a2a0a 1213 }
1214 }
1215
1216 const Int_t newNbins = int(count+0.5);
1217 Double_t *newBins = new Double_t[newNbins+1];
1218 for(Int_t i=0; i<=count+1;i++){
1219 newBins[i] = i;
1220 }
1221
3d7cc0b4 1222 TGraph *graphNew = new TGraph(entries,tempArray,graph->GetY());
df0a2a0a 1223 graphNew->GetXaxis()->Set(newNbins,newBins);
1224
1225 Char_t xName[50];
df0a2a0a 1226 for(Int_t i=0;i<count;i++){
baa0041d 1227 snprintf(xName,50,"%d",Int_t(vrun[i]));
df0a2a0a 1228 graphNew->GetXaxis()->SetBinLabel(i+1,xName);
1229 }
1230 graphNew->GetHistogram()->SetTitle("");
1231
3d7cc0b4 1232 delete [] tempArray;
df0a2a0a 1233 delete [] index;
1234 delete [] newBins;
baa0041d 1235 delete [] vrun;
df0a2a0a 1236 return graphNew;
1237}
1238