]> git.uio.no Git - u/mrichter/AliRoot.git/blame - STAT/TStatToolkit.cxx
cosmetics
[u/mrichter/AliRoot.git] / STAT / TStatToolkit.cxx
CommitLineData
21f3a443 1/**************************************************************************
2 * Copyright(c) 1998-1999, ALICE Experiment at CERN, All rights reserved. *
3 * *
4 * Author: The ALICE Off-line Project. *
5 * Contributors are mentioned in the code where appropriate. *
6 * *
7 * Permission to use, copy, modify and distribute this software and its *
8 * documentation strictly for non-commercial purposes is hereby granted *
9 * without fee, provided that the above copyright notice appears in all *
10 * copies and that both the copyright notice and this permission notice *
11 * appear in the supporting documentation. The authors make no claims *
12 * about the suitability of this software for any purpose. It is *
13 * provided "as is" without express or implied warranty. *
14 **************************************************************************/
15
16
17///////////////////////////////////////////////////////////////////////////
18// Class TStatToolkit
19//
20// Subset of matheamtical functions not included in the TMath
21//
22
23///////////////////////////////////////////////////////////////////////////
24#include "TMath.h"
25#include "Riostream.h"
26#include "TH1F.h"
27#include "TH3.h"
28#include "TF1.h"
29#include "TTree.h"
30#include "TChain.h"
31#include "TObjString.h"
32#include "TLinearFitter.h"
3d7cc0b4 33#include "TGraph2D.h"
34#include "TGraph.h"
21f3a443 35
36//
37// includes neccessary for test functions
38//
39#include "TSystem.h"
40#include "TRandom.h"
41#include "TStopwatch.h"
42#include "TTreeStream.h"
43
44#include "TStatToolkit.h"
45
46
47ClassImp(TStatToolkit) // Class implementation to enable ROOT I/O
48
49TStatToolkit::TStatToolkit() : TObject()
50{
51 //
52 // Default constructor
53 //
54}
55///////////////////////////////////////////////////////////////////////////
56TStatToolkit::~TStatToolkit()
57{
58 //
59 // Destructor
60 //
61}
62
63
64//_____________________________________________________________________________
65void TStatToolkit::EvaluateUni(Int_t nvectors, Double_t *data, Double_t &mean
66 , Double_t &sigma, Int_t hh)
67{
68 //
69 // Robust estimator in 1D case MI version - (faster than ROOT version)
70 //
71 // For the univariate case
72 // estimates of location and scatter are returned in mean and sigma parameters
73 // the algorithm works on the same principle as in multivariate case -
74 // it finds a subset of size hh with smallest sigma, and then returns mean and
75 // sigma of this subset
76 //
77
78 if (hh==0)
79 hh=(nvectors+2)/2;
80 Double_t faclts[]={2.6477,2.5092,2.3826,2.2662,2.1587,2.0589,1.9660,1.879,1.7973,1.7203,1.6473};
81 Int_t *index=new Int_t[nvectors];
82 TMath::Sort(nvectors, data, index, kFALSE);
83
84 Int_t nquant = TMath::Min(Int_t(Double_t(((hh*1./nvectors)-0.5)*40))+1, 11);
85 Double_t factor = faclts[TMath::Max(0,nquant-1)];
86
87 Double_t sumx =0;
88 Double_t sumx2 =0;
89 Int_t bestindex = -1;
90 Double_t bestmean = 0;
91 Double_t bestsigma = (data[index[nvectors-1]]-data[index[0]]+1.); // maximal possible sigma
92 bestsigma *=bestsigma;
93
94 for (Int_t i=0; i<hh; i++){
95 sumx += data[index[i]];
96 sumx2 += data[index[i]]*data[index[i]];
97 }
98
99 Double_t norm = 1./Double_t(hh);
bd7b4d18 100 Double_t norm2 = (hh-1)>0 ? 1./Double_t(hh-1):1;
21f3a443 101 for (Int_t i=hh; i<nvectors; i++){
102 Double_t cmean = sumx*norm;
103 Double_t csigma = (sumx2 - hh*cmean*cmean)*norm2;
104 if (csigma<bestsigma){
105 bestmean = cmean;
106 bestsigma = csigma;
107 bestindex = i-hh;
108 }
109
110 sumx += data[index[i]]-data[index[i-hh]];
111 sumx2 += data[index[i]]*data[index[i]]-data[index[i-hh]]*data[index[i-hh]];
112 }
113
114 Double_t bstd=factor*TMath::Sqrt(TMath::Abs(bestsigma));
115 mean = bestmean;
116 sigma = bstd;
117 delete [] index;
118
119}
120
121
122
123void TStatToolkit::EvaluateUniExternal(Int_t nvectors, Double_t *data, Double_t &mean, Double_t &sigma, Int_t hh, Float_t externalfactor)
124{
125 // Modified version of ROOT robust EvaluateUni
126 // robust estimator in 1D case MI version
127 // added external factor to include precision of external measurement
128 //
129
130 if (hh==0)
131 hh=(nvectors+2)/2;
132 Double_t faclts[]={2.6477,2.5092,2.3826,2.2662,2.1587,2.0589,1.9660,1.879,1.7973,1.7203,1.6473};
133 Int_t *index=new Int_t[nvectors];
134 TMath::Sort(nvectors, data, index, kFALSE);
135 //
136 Int_t nquant = TMath::Min(Int_t(Double_t(((hh*1./nvectors)-0.5)*40))+1, 11);
137 Double_t factor = faclts[0];
138 if (nquant>0){
139 // fix proper normalization - Anja
140 factor = faclts[nquant-1];
141 }
142
143 //
144 //
145 Double_t sumx =0;
146 Double_t sumx2 =0;
147 Int_t bestindex = -1;
148 Double_t bestmean = 0;
149 Double_t bestsigma = -1;
150 for (Int_t i=0; i<hh; i++){
151 sumx += data[index[i]];
152 sumx2 += data[index[i]]*data[index[i]];
153 }
154 //
155 Double_t kfactor = 2.*externalfactor - externalfactor*externalfactor;
156 Double_t norm = 1./Double_t(hh);
157 for (Int_t i=hh; i<nvectors; i++){
158 Double_t cmean = sumx*norm;
159 Double_t csigma = (sumx2*norm - cmean*cmean*kfactor);
160 if (csigma<bestsigma || bestsigma<0){
161 bestmean = cmean;
162 bestsigma = csigma;
163 bestindex = i-hh;
164 }
165 //
166 //
167 sumx += data[index[i]]-data[index[i-hh]];
168 sumx2 += data[index[i]]*data[index[i]]-data[index[i-hh]]*data[index[i-hh]];
169 }
170
171 Double_t bstd=factor*TMath::Sqrt(TMath::Abs(bestsigma));
172 mean = bestmean;
173 sigma = bstd;
174 delete [] index;
175}
176
177
178//_____________________________________________________________________________
179Int_t TStatToolkit::Freq(Int_t n, const Int_t *inlist
180 , Int_t *outlist, Bool_t down)
181{
182 //
183 // Sort eleements according occurancy
184 // The size of output array has is 2*n
185 //
186
187 Int_t * sindexS = new Int_t[n]; // temp array for sorting
188 Int_t * sindexF = new Int_t[2*n];
b8072cce 189 for (Int_t i=0;i<n;i++) sindexS[i]=0;
190 for (Int_t i=0;i<2*n;i++) sindexF[i]=0;
21f3a443 191 //
192 TMath::Sort(n,inlist, sindexS, down);
193 Int_t last = inlist[sindexS[0]];
194 Int_t val = last;
195 sindexF[0] = 1;
196 sindexF[0+n] = last;
197 Int_t countPos = 0;
198 //
199 // find frequency
200 for(Int_t i=1;i<n; i++){
201 val = inlist[sindexS[i]];
202 if (last == val) sindexF[countPos]++;
203 else{
204 countPos++;
205 sindexF[countPos+n] = val;
206 sindexF[countPos]++;
207 last =val;
208 }
209 }
210 if (last==val) countPos++;
211 // sort according frequency
212 TMath::Sort(countPos, sindexF, sindexS, kTRUE);
213 for (Int_t i=0;i<countPos;i++){
214 outlist[2*i ] = sindexF[sindexS[i]+n];
215 outlist[2*i+1] = sindexF[sindexS[i]];
216 }
217 delete [] sindexS;
218 delete [] sindexF;
219
220 return countPos;
221
222}
223
224//___TStatToolkit__________________________________________________________________________
3d7cc0b4 225void TStatToolkit::TruncatedMean(const TH1 * his, TVectorD *param, Float_t down, Float_t up, Bool_t verbose){
21f3a443 226 //
227 //
228 //
229 Int_t nbins = his->GetNbinsX();
230 Float_t nentries = his->GetEntries();
231 Float_t sum =0;
232 Float_t mean = 0;
233 Float_t sigma2 = 0;
234 Float_t ncumul=0;
235 for (Int_t ibin=1;ibin<nbins; ibin++){
236 ncumul+= his->GetBinContent(ibin);
237 Float_t fraction = Float_t(ncumul)/Float_t(nentries);
238 if (fraction>down && fraction<up){
239 sum+=his->GetBinContent(ibin);
240 mean+=his->GetBinCenter(ibin)*his->GetBinContent(ibin);
241 sigma2+=his->GetBinCenter(ibin)*his->GetBinCenter(ibin)*his->GetBinContent(ibin);
242 }
243 }
244 mean/=sum;
245 sigma2= TMath::Sqrt(TMath::Abs(sigma2/sum-mean*mean));
246 if (param){
247 (*param)[0] = his->GetMaximum();
248 (*param)[1] = mean;
249 (*param)[2] = sigma2;
250
251 }
252 if (verbose) printf("Mean\t%f\t Sigma2\t%f\n", mean,sigma2);
253}
254
255void TStatToolkit::LTM(TH1F * his, TVectorD *param , Float_t fraction, Bool_t verbose){
256 //
257 // LTM
258 //
259 Int_t nbins = his->GetNbinsX();
260 Int_t nentries = (Int_t)his->GetEntries();
261 Double_t *data = new Double_t[nentries];
262 Int_t npoints=0;
263 for (Int_t ibin=1;ibin<nbins; ibin++){
264 Float_t entriesI = his->GetBinContent(ibin);
265 Float_t xcenter= his->GetBinCenter(ibin);
266 for (Int_t ic=0; ic<entriesI; ic++){
267 if (npoints<nentries){
268 data[npoints]= xcenter;
269 npoints++;
270 }
271 }
272 }
273 Double_t mean, sigma;
274 Int_t npoints2=TMath::Min(Int_t(fraction*Float_t(npoints)),npoints-1);
275 npoints2=TMath::Max(Int_t(0.5*Float_t(npoints)),npoints2);
276 TStatToolkit::EvaluateUni(npoints, data, mean,sigma,npoints2);
277 delete [] data;
278 if (verbose) printf("Mean\t%f\t Sigma2\t%f\n", mean,sigma);if (param){
279 (*param)[0] = his->GetMaximum();
280 (*param)[1] = mean;
281 (*param)[2] = sigma;
282 }
283}
284
94a43b22 285Double_t TStatToolkit::FitGaus(TH1* his, TVectorD *param, TMatrixD */*matrix*/, Float_t xmin, Float_t xmax, Bool_t verbose){
21f3a443 286 //
287 // Fit histogram with gaussian function
288 //
289 // Prameters:
290 // return value- chi2 - if negative ( not enough points)
291 // his - input histogram
292 // param - vector with parameters
293 // xmin, xmax - range to fit - if xmin=xmax=0 - the full histogram range used
294 // Fitting:
295 // 1. Step - make logarithm
296 // 2. Linear fit (parabola) - more robust - always converge
297 // 3. In case of small statistic bins are averaged
298 //
299 static TLinearFitter fitter(3,"pol2");
300 TVectorD par(3);
301 TVectorD sigma(3);
302 TMatrixD mat(3,3);
303 if (his->GetMaximum()<4) return -1;
304 if (his->GetEntries()<12) return -1;
305 if (his->GetRMS()<mat.GetTol()) return -1;
306 Float_t maxEstimate = his->GetEntries()*his->GetBinWidth(1)/TMath::Sqrt((TMath::TwoPi()*his->GetRMS()));
307 Int_t dsmooth = TMath::Nint(6./TMath::Sqrt(maxEstimate));
308
309 if (maxEstimate<1) return -1;
310 Int_t nbins = his->GetNbinsX();
311 Int_t npoints=0;
312 //
313
314
315 if (xmin>=xmax){
316 xmin = his->GetXaxis()->GetXmin();
317 xmax = his->GetXaxis()->GetXmax();
318 }
319 for (Int_t iter=0; iter<2; iter++){
320 fitter.ClearPoints();
321 npoints=0;
322 for (Int_t ibin=1;ibin<nbins+1; ibin++){
323 Int_t countB=1;
324 Float_t entriesI = his->GetBinContent(ibin);
325 for (Int_t delta = -dsmooth; delta<=dsmooth; delta++){
326 if (ibin+delta>1 &&ibin+delta<nbins-1){
327 entriesI += his->GetBinContent(ibin+delta);
328 countB++;
329 }
330 }
331 entriesI/=countB;
332 Double_t xcenter= his->GetBinCenter(ibin);
333 if (xcenter<xmin || xcenter>xmax) continue;
334 Double_t error=1./TMath::Sqrt(countB);
335 Float_t cont=2;
336 if (iter>0){
337 if (par[0]+par[1]*xcenter+par[2]*xcenter*xcenter>20) return 0;
338 cont = TMath::Exp(par[0]+par[1]*xcenter+par[2]*xcenter*xcenter);
339 if (cont>1.) error = 1./TMath::Sqrt(cont*Float_t(countB));
340 }
341 if (entriesI>1&&cont>1){
342 fitter.AddPoint(&xcenter,TMath::Log(Float_t(entriesI)),error);
343 npoints++;
344 }
345 }
346 if (npoints>3){
347 fitter.Eval();
348 fitter.GetParameters(par);
349 }else{
350 break;
351 }
352 }
353 if (npoints<=3){
354 return -1;
355 }
356 fitter.GetParameters(par);
357 fitter.GetCovarianceMatrix(mat);
358 if (TMath::Abs(par[1])<mat.GetTol()) return -1;
359 if (TMath::Abs(par[2])<mat.GetTol()) return -1;
360 Double_t chi2 = fitter.GetChisquare()/Float_t(npoints);
361 //fitter.GetParameters();
362 if (!param) param = new TVectorD(3);
cb1d20de 363 // if (!matrix) matrix = new TMatrixD(3,3); // Covariance matrix to be implemented
21f3a443 364 (*param)[1] = par[1]/(-2.*par[2]);
365 (*param)[2] = 1./TMath::Sqrt(TMath::Abs(-2.*par[2]));
366 (*param)[0] = TMath::Exp(par[0]+ par[1]* (*param)[1] + par[2]*(*param)[1]*(*param)[1]);
367 if (verbose){
368 par.Print();
369 mat.Print();
370 param->Print();
371 printf("Chi2=%f\n",chi2);
372 TF1 * f1= new TF1("f1","[0]*exp(-(x-[1])^2/(2*[2]*[2]))",his->GetXaxis()->GetXmin(),his->GetXaxis()->GetXmax());
373 f1->SetParameter(0, (*param)[0]);
374 f1->SetParameter(1, (*param)[1]);
375 f1->SetParameter(2, (*param)[2]);
376 f1->Draw("same");
377 }
378 return chi2;
379}
380
cb1d20de 381Double_t TStatToolkit::FitGaus(Float_t *arr, Int_t nBins, Float_t xMin, Float_t xMax, TVectorD *param, TMatrixD */*matrix*/, Bool_t verbose){
21f3a443 382 //
383 // Fit histogram with gaussian function
384 //
385 // Prameters:
386 // nbins: size of the array and number of histogram bins
387 // xMin, xMax: histogram range
388 // param: paramters of the fit (0-Constant, 1-Mean, 2-Sigma)
389 // matrix: covariance matrix -- not implemented yet, pass dummy matrix!!!
390 //
391 // Return values:
392 // >0: the chi2 returned by TLinearFitter
393 // -3: only three points have been used for the calculation - no fitter was used
394 // -2: only two points have been used for the calculation - center of gravity was uesed for calculation
395 // -1: only one point has been used for the calculation - center of gravity was uesed for calculation
396 // -4: invalid result!!
397 //
398 // Fitting:
399 // 1. Step - make logarithm
400 // 2. Linear fit (parabola) - more robust - always converge
401 //
402 static TLinearFitter fitter(3,"pol2");
403 static TMatrixD mat(3,3);
404 static Double_t kTol = mat.GetTol();
405 fitter.StoreData(kFALSE);
406 fitter.ClearPoints();
407 TVectorD par(3);
408 TVectorD sigma(3);
3d7cc0b4 409 TMatrixD matA(3,3);
21f3a443 410 TMatrixD b(3,1);
411 Float_t rms = TMath::RMS(nBins,arr);
412 Float_t max = TMath::MaxElement(nBins,arr);
413 Float_t binWidth = (xMax-xMin)/(Float_t)nBins;
414
415 Float_t meanCOG = 0;
416 Float_t rms2COG = 0;
417 Float_t sumCOG = 0;
418
419 Float_t entries = 0;
420 Int_t nfilled=0;
421
422 for (Int_t i=0; i<nBins; i++){
423 entries+=arr[i];
424 if (arr[i]>0) nfilled++;
425 }
426
427 if (max<4) return -4;
428 if (entries<12) return -4;
429 if (rms<kTol) return -4;
430
431 Int_t npoints=0;
432 //
433
434 //
435 for (Int_t ibin=0;ibin<nBins; ibin++){
436 Float_t entriesI = arr[ibin];
437 if (entriesI>1){
438 Double_t xcenter = xMin+(ibin+0.5)*binWidth;
439
440 Float_t error = 1./TMath::Sqrt(entriesI);
441 Float_t val = TMath::Log(Float_t(entriesI));
442 fitter.AddPoint(&xcenter,val,error);
443 if (npoints<3){
3d7cc0b4 444 matA(npoints,0)=1;
445 matA(npoints,1)=xcenter;
446 matA(npoints,2)=xcenter*xcenter;
21f3a443 447 b(npoints,0)=val;
448 meanCOG+=xcenter*entriesI;
449 rms2COG +=xcenter*entriesI*xcenter;
450 sumCOG +=entriesI;
451 }
452 npoints++;
453 }
454 }
455
456
457 Double_t chi2 = 0;
458 if (npoints>=3){
459 if ( npoints == 3 ){
460 //analytic calculation of the parameters for three points
3d7cc0b4 461 matA.Invert();
21f3a443 462 TMatrixD res(1,3);
3d7cc0b4 463 res.Mult(matA,b);
21f3a443 464 par[0]=res(0,0);
465 par[1]=res(0,1);
466 par[2]=res(0,2);
467 chi2 = -3.;
468 } else {
469 // use fitter for more than three points
470 fitter.Eval();
471 fitter.GetParameters(par);
472 fitter.GetCovarianceMatrix(mat);
473 chi2 = fitter.GetChisquare()/Float_t(npoints);
474 }
475 if (TMath::Abs(par[1])<kTol) return -4;
476 if (TMath::Abs(par[2])<kTol) return -4;
477
478 if (!param) param = new TVectorD(3);
cb1d20de 479 //if (!matrix) matrix = new TMatrixD(3,3); // !!!!might be a memory leek. use dummy matrix pointer to call this function! // Covariance matrix to be implemented
21f3a443 480
481 (*param)[1] = par[1]/(-2.*par[2]);
482 (*param)[2] = 1./TMath::Sqrt(TMath::Abs(-2.*par[2]));
483 Double_t lnparam0 = par[0]+ par[1]* (*param)[1] + par[2]*(*param)[1]*(*param)[1];
484 if ( lnparam0>307 ) return -4;
485 (*param)[0] = TMath::Exp(lnparam0);
486 if (verbose){
487 par.Print();
488 mat.Print();
489 param->Print();
490 printf("Chi2=%f\n",chi2);
491 TF1 * f1= new TF1("f1","[0]*exp(-(x-[1])^2/(2*[2]*[2]))",xMin,xMax);
492 f1->SetParameter(0, (*param)[0]);
493 f1->SetParameter(1, (*param)[1]);
494 f1->SetParameter(2, (*param)[2]);
495 f1->Draw("same");
496 }
497 return chi2;
498 }
499
500 if (npoints == 2){
501 //use center of gravity for 2 points
502 meanCOG/=sumCOG;
503 rms2COG /=sumCOG;
504 (*param)[0] = max;
505 (*param)[1] = meanCOG;
506 (*param)[2] = TMath::Sqrt(TMath::Abs(meanCOG*meanCOG-rms2COG));
507 chi2=-2.;
508 }
509 if ( npoints == 1 ){
510 meanCOG/=sumCOG;
511 (*param)[0] = max;
512 (*param)[1] = meanCOG;
513 (*param)[2] = binWidth/TMath::Sqrt(12);
514 chi2=-1.;
515 }
516 return chi2;
517
518}
519
520
3d7cc0b4 521Float_t TStatToolkit::GetCOG(const Short_t *arr, Int_t nBins, Float_t xMin, Float_t xMax, Float_t *rms, Float_t *sum)
21f3a443 522{
523 //
524 // calculate center of gravity rms and sum for array 'arr' with nBins an a x range xMin to xMax
525 // return COG; in case of failure return xMin
526 //
527 Float_t meanCOG = 0;
528 Float_t rms2COG = 0;
529 Float_t sumCOG = 0;
530 Int_t npoints = 0;
531
532 Float_t binWidth = (xMax-xMin)/(Float_t)nBins;
533
534 for (Int_t ibin=0; ibin<nBins; ibin++){
535 Float_t entriesI = (Float_t)arr[ibin];
536 Double_t xcenter = xMin+(ibin+0.5)*binWidth;
537 if ( entriesI>0 ){
538 meanCOG += xcenter*entriesI;
539 rms2COG += xcenter*entriesI*xcenter;
540 sumCOG += entriesI;
541 npoints++;
542 }
543 }
544 if ( sumCOG == 0 ) return xMin;
545 meanCOG/=sumCOG;
546
547 if ( rms ){
548 rms2COG /=sumCOG;
549 (*rms) = TMath::Sqrt(TMath::Abs(meanCOG*meanCOG-rms2COG));
550 if ( npoints == 1 ) (*rms) = binWidth/TMath::Sqrt(12);
551 }
552
553 if ( sum )
554 (*sum) = sumCOG;
555
556 return meanCOG;
557}
558
559
560
561///////////////////////////////////////////////////////////////
562////////////// TEST functions /////////////////////////
563///////////////////////////////////////////////////////////////
564
565
566
567
568
569void TStatToolkit::TestGausFit(Int_t nhistos){
570 //
571 // Test performance of the parabolic - gaussian fit - compare it with
572 // ROOT gauss fit
573 // nhistos - number of histograms to be used for test
574 //
575 TTreeSRedirector *pcstream = new TTreeSRedirector("fitdebug.root");
576
577 Float_t *xTrue = new Float_t[nhistos];
578 Float_t *sTrue = new Float_t[nhistos];
579 TVectorD **par1 = new TVectorD*[nhistos];
580 TVectorD **par2 = new TVectorD*[nhistos];
581 TMatrixD dummy(3,3);
582
583
584 TH1F **h1f = new TH1F*[nhistos];
585 TF1 *myg = new TF1("myg","gaus");
586 TF1 *fit = new TF1("fit","gaus");
587 gRandom->SetSeed(0);
588
589 //init
590 for (Int_t i=0;i<nhistos; i++){
591 par1[i] = new TVectorD(3);
592 par2[i] = new TVectorD(3);
593 h1f[i] = new TH1F(Form("h1f%d",i),Form("h1f%d",i),20,-10,10);
594 xTrue[i]= gRandom->Rndm();
595 gSystem->Sleep(2);
596 sTrue[i]= .75+gRandom->Rndm()*.5;
597 myg->SetParameters(1,xTrue[i],sTrue[i]);
598 h1f[i]->FillRandom("myg");
599 }
600
601 TStopwatch s;
602 s.Start();
603 //standard gaus fit
604 for (Int_t i=0; i<nhistos; i++){
605 h1f[i]->Fit(fit,"0q");
606 (*par1[i])(0) = fit->GetParameter(0);
607 (*par1[i])(1) = fit->GetParameter(1);
608 (*par1[i])(2) = fit->GetParameter(2);
609 }
610 s.Stop();
611 printf("Gaussian fit\t");
612 s.Print();
613
614 s.Start();
615 //TStatToolkit gaus fit
616 for (Int_t i=0; i<nhistos; i++){
617 TStatToolkit::FitGaus(h1f[i]->GetArray()+1,h1f[i]->GetNbinsX(),h1f[i]->GetXaxis()->GetXmin(),h1f[i]->GetXaxis()->GetXmax(),par2[i],&dummy);
618 }
619
620 s.Stop();
621 printf("Parabolic fit\t");
622 s.Print();
623 //write stream
624 for (Int_t i=0;i<nhistos; i++){
625 Float_t xt = xTrue[i];
626 Float_t st = sTrue[i];
627 (*pcstream)<<"data"
628 <<"xTrue="<<xt
629 <<"sTrue="<<st
630 <<"pg.="<<(par1[i])
631 <<"pa.="<<(par2[i])
632 <<"\n";
633 }
634 //delete pointers
635 for (Int_t i=0;i<nhistos; i++){
636 delete par1[i];
637 delete par2[i];
638 delete h1f[i];
639 }
640 delete pcstream;
641 delete []h1f;
642 delete []xTrue;
643 delete []sTrue;
644 //
645 delete []par1;
646 delete []par2;
647
648}
649
650
651
652TGraph2D * TStatToolkit::MakeStat2D(TH3 * his, Int_t delta0, Int_t delta1, Int_t type){
653 //
654 //
655 //
656 // delta - number of bins to integrate
657 // type - 0 - mean value
658
659 TAxis * xaxis = his->GetXaxis();
660 TAxis * yaxis = his->GetYaxis();
661 // TAxis * zaxis = his->GetZaxis();
662 Int_t nbinx = xaxis->GetNbins();
663 Int_t nbiny = yaxis->GetNbins();
664 char name[1000];
665 Int_t icount=0;
666 TGraph2D *graph = new TGraph2D(nbinx*nbiny);
667 TF1 f1("f1","gaus");
668 for (Int_t ix=0; ix<nbinx;ix++)
669 for (Int_t iy=0; iy<nbiny;iy++){
670 Float_t xcenter = xaxis->GetBinCenter(ix);
671 Float_t ycenter = yaxis->GetBinCenter(iy);
cb1d20de 672 snprintf(name,1000,"%s_%d_%d",his->GetName(), ix,iy);
21f3a443 673 TH1 *projection = his->ProjectionZ(name,ix-delta0,ix+delta0,iy-delta1,iy+delta1);
674 Float_t stat= 0;
675 if (type==0) stat = projection->GetMean();
676 if (type==1) stat = projection->GetRMS();
677 if (type==2 || type==3){
678 TVectorD vec(3);
679 TStatToolkit::LTM((TH1F*)projection,&vec,0.7);
680 if (type==2) stat= vec[1];
681 if (type==3) stat= vec[0];
682 }
683 if (type==4|| type==5){
684 projection->Fit(&f1);
685 if (type==4) stat= f1.GetParameter(1);
686 if (type==5) stat= f1.GetParameter(2);
687 }
688 //printf("%d\t%f\t%f\t%f\n", icount,xcenter, ycenter, stat);
689 graph->SetPoint(icount,xcenter, ycenter, stat);
690 icount++;
691 }
692 return graph;
693}
694
695TGraph * TStatToolkit::MakeStat1D(TH3 * his, Int_t delta1, Int_t type){
696 //
697 //
698 //
699 // delta - number of bins to integrate
700 // type - 0 - mean value
701
702 TAxis * xaxis = his->GetXaxis();
703 TAxis * yaxis = his->GetYaxis();
704 // TAxis * zaxis = his->GetZaxis();
705 Int_t nbinx = xaxis->GetNbins();
706 Int_t nbiny = yaxis->GetNbins();
707 char name[1000];
708 Int_t icount=0;
709 TGraph *graph = new TGraph(nbinx);
710 TF1 f1("f1","gaus");
711 for (Int_t ix=0; ix<nbinx;ix++){
712 Float_t xcenter = xaxis->GetBinCenter(ix);
713 // Float_t ycenter = yaxis->GetBinCenter(iy);
cb1d20de 714 snprintf(name,1000,"%s_%d",his->GetName(), ix);
21f3a443 715 TH1 *projection = his->ProjectionZ(name,ix-delta1,ix+delta1,0,nbiny);
716 Float_t stat= 0;
717 if (type==0) stat = projection->GetMean();
718 if (type==1) stat = projection->GetRMS();
719 if (type==2 || type==3){
720 TVectorD vec(3);
721 TStatToolkit::LTM((TH1F*)projection,&vec,0.7);
722 if (type==2) stat= vec[1];
723 if (type==3) stat= vec[0];
724 }
725 if (type==4|| type==5){
726 projection->Fit(&f1);
727 if (type==4) stat= f1.GetParameter(1);
728 if (type==5) stat= f1.GetParameter(2);
729 }
730 //printf("%d\t%f\t%f\t%f\n", icount,xcenter, ycenter, stat);
731 graph->SetPoint(icount,xcenter, stat);
732 icount++;
733 }
734 return graph;
735}
736
737
738
739
740
88b1c775 741TString* TStatToolkit::FitPlane(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop,Bool_t fix0){
21f3a443 742 //
743 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
744 // returns chi2, fitParam and covMatrix
745 // returns TString with fitted formula
746 //
dd46129c 747
21f3a443 748 TString formulaStr(formula);
749 TString drawStr(drawCommand);
750 TString cutStr(cuts);
dd46129c 751 TString ferr("1");
752
753 TString strVal(drawCommand);
754 if (strVal.Contains(":")){
755 TObjArray* valTokens = strVal.Tokenize(":");
756 drawStr = valTokens->At(0)->GetName();
757 ferr = valTokens->At(1)->GetName();
758 }
759
21f3a443 760
761 formulaStr.ReplaceAll("++", "~");
762 TObjArray* formulaTokens = formulaStr.Tokenize("~");
763 Int_t dim = formulaTokens->GetEntriesFast();
764
765 fitParam.ResizeTo(dim);
766 covMatrix.ResizeTo(dim,dim);
767
768 TLinearFitter* fitter = new TLinearFitter(dim+1, Form("hyp%d",dim));
769 fitter->StoreData(kTRUE);
770 fitter->ClearPoints();
771
772 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
773 if (entries == -1) return new TString("An ERROR has occured during fitting!");
bd7b4d18 774 Double_t **values = new Double_t*[dim+1] ;
775 for (Int_t i=0; i<dim+1; i++) values[i]=NULL;
dd46129c 776 //
777 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 778 if (entries == -1) {
779 delete []values;
780 return new TString("An ERROR has occured during fitting!");
781 }
dd46129c 782 Double_t *errors = new Double_t[entries];
783 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
21f3a443 784
785 for (Int_t i = 0; i < dim + 1; i++){
786 Int_t centries = 0;
787 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
788 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
789
b8072cce 790 if (entries != centries) {
791 delete []errors;
792 delete []values;
793 return new TString("An ERROR has occured during fitting!");
794 }
21f3a443 795 values[i] = new Double_t[entries];
796 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
797 }
798
799 // add points to the fitter
800 for (Int_t i = 0; i < entries; i++){
801 Double_t x[1000];
802 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
dd46129c 803 fitter->AddPoint(x, values[dim][i], errors[i]);
21f3a443 804 }
805
806 fitter->Eval();
2c629c56 807 if (frac>0.5 && frac<1){
808 fitter->EvalRobust(frac);
88b1c775 809 }else{
810 if (fix0) {
811 fitter->FixParameter(0,0);
812 fitter->Eval();
813 }
2c629c56 814 }
21f3a443 815 fitter->GetParameters(fitParam);
816 fitter->GetCovarianceMatrix(covMatrix);
817 chi2 = fitter->GetChisquare();
b8072cce 818 npoints = entries;
21f3a443 819 TString *preturnFormula = new TString(Form("( %f+",fitParam[0])), &returnFormula = *preturnFormula;
820
821 for (Int_t iparam = 0; iparam < dim; iparam++) {
822 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam+1]));
823 if (iparam < dim-1) returnFormula.Append("+");
824 }
825 returnFormula.Append(" )");
4d61c301 826
827
b8072cce 828 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
4d61c301 829
830
cb1d20de 831 delete formulaTokens;
832 delete fitter;
833 delete[] values;
b8072cce 834 delete[] errors;
cb1d20de 835 return preturnFormula;
836}
837
838TString* TStatToolkit::FitPlaneConstrain(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop,Double_t constrain){
839 //
840 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
841 // returns chi2, fitParam and covMatrix
842 // returns TString with fitted formula
843 //
844
845 TString formulaStr(formula);
846 TString drawStr(drawCommand);
847 TString cutStr(cuts);
848 TString ferr("1");
849
850 TString strVal(drawCommand);
851 if (strVal.Contains(":")){
852 TObjArray* valTokens = strVal.Tokenize(":");
853 drawStr = valTokens->At(0)->GetName();
854 ferr = valTokens->At(1)->GetName();
855 }
856
857
858 formulaStr.ReplaceAll("++", "~");
859 TObjArray* formulaTokens = formulaStr.Tokenize("~");
860 Int_t dim = formulaTokens->GetEntriesFast();
861
862 fitParam.ResizeTo(dim);
863 covMatrix.ResizeTo(dim,dim);
864
865 TLinearFitter* fitter = new TLinearFitter(dim+1, Form("hyp%d",dim));
866 fitter->StoreData(kTRUE);
867 fitter->ClearPoints();
868
869 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
870 if (entries == -1) return new TString("An ERROR has occured during fitting!");
871 Double_t **values = new Double_t*[dim+1] ;
bd7b4d18 872 for (Int_t i=0; i<dim+1; i++) values[i]=NULL;
cb1d20de 873 //
874 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 875 if (entries == -1) {
876 delete [] values;
877 return new TString("An ERROR has occured during fitting!");
878 }
cb1d20de 879 Double_t *errors = new Double_t[entries];
880 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
881
882 for (Int_t i = 0; i < dim + 1; i++){
883 Int_t centries = 0;
884 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
885 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
886
b8072cce 887 if (entries != centries) {
888 delete []errors;
889 delete []values;
890 return new TString("An ERROR has occured during fitting!");
891 }
cb1d20de 892 values[i] = new Double_t[entries];
893 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
894 }
895
896 // add points to the fitter
897 for (Int_t i = 0; i < entries; i++){
898 Double_t x[1000];
899 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
900 fitter->AddPoint(x, values[dim][i], errors[i]);
901 }
902 if (constrain>0){
903 for (Int_t i = 0; i < dim; i++){
904 Double_t x[1000];
905 for (Int_t j=0; j<dim;j++) if (i!=j) x[j]=0;
906 x[i]=1.;
907 fitter->AddPoint(x, 0, constrain);
908 }
909 }
910
911
912 fitter->Eval();
913 if (frac>0.5 && frac<1){
914 fitter->EvalRobust(frac);
915 }
916 fitter->GetParameters(fitParam);
917 fitter->GetCovarianceMatrix(covMatrix);
918 chi2 = fitter->GetChisquare();
919 npoints = entries;
cb1d20de 920
921 TString *preturnFormula = new TString(Form("( %f+",fitParam[0])), &returnFormula = *preturnFormula;
922
923 for (Int_t iparam = 0; iparam < dim; iparam++) {
924 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam+1]));
925 if (iparam < dim-1) returnFormula.Append("+");
926 }
927 returnFormula.Append(" )");
928
b8072cce 929 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
cb1d20de 930
931
932
933 delete formulaTokens;
934 delete fitter;
935 delete[] values;
b8072cce 936 delete[] errors;
cb1d20de 937 return preturnFormula;
938}
939
940
941
942TString* TStatToolkit::FitPlaneFixed(TTree *tree, const char* drawCommand, const char* formula, const char* cuts, Double_t & chi2, Int_t &npoints, TVectorD &fitParam, TMatrixD &covMatrix, Float_t frac, Int_t start, Int_t stop){
943 //
944 // fit an arbitrary function, specified by formula into the data, specified by drawCommand and cuts
945 // returns chi2, fitParam and covMatrix
946 // returns TString with fitted formula
947 //
948
949 TString formulaStr(formula);
950 TString drawStr(drawCommand);
951 TString cutStr(cuts);
952 TString ferr("1");
953
954 TString strVal(drawCommand);
955 if (strVal.Contains(":")){
956 TObjArray* valTokens = strVal.Tokenize(":");
957 drawStr = valTokens->At(0)->GetName();
958 ferr = valTokens->At(1)->GetName();
959 }
960
961
962 formulaStr.ReplaceAll("++", "~");
963 TObjArray* formulaTokens = formulaStr.Tokenize("~");
964 Int_t dim = formulaTokens->GetEntriesFast();
965
966 fitParam.ResizeTo(dim);
967 covMatrix.ResizeTo(dim,dim);
968 TString fitString="x0";
969 for (Int_t i=1; i<dim; i++) fitString+=Form("++x%d",i);
970 TLinearFitter* fitter = new TLinearFitter(dim, fitString.Data());
971 fitter->StoreData(kTRUE);
972 fitter->ClearPoints();
973
974 Int_t entries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start, start);
975 if (entries == -1) return new TString("An ERROR has occured during fitting!");
976 Double_t **values = new Double_t*[dim+1] ;
bd7b4d18 977 for (Int_t i=0; i<dim+1; i++) values[i]=NULL;
cb1d20de 978 //
979 entries = tree->Draw(ferr.Data(), cutStr.Data(), "goff", stop-start, start);
b8072cce 980 if (entries == -1) {
981 delete []values;
982 return new TString("An ERROR has occured during fitting!");
983 }
cb1d20de 984 Double_t *errors = new Double_t[entries];
985 memcpy(errors, tree->GetV1(), entries*sizeof(Double_t));
986
987 for (Int_t i = 0; i < dim + 1; i++){
988 Int_t centries = 0;
989 if (i < dim) centries = tree->Draw(((TObjString*)formulaTokens->At(i))->GetName(), cutStr.Data(), "goff", stop-start,start);
990 else centries = tree->Draw(drawStr.Data(), cutStr.Data(), "goff", stop-start,start);
991
b8072cce 992 if (entries != centries) {
993 delete []errors;
994 delete []values;
995 return new TString("An ERROR has occured during fitting!");
996 }
cb1d20de 997 values[i] = new Double_t[entries];
998 memcpy(values[i], tree->GetV1(), entries*sizeof(Double_t));
999 }
1000
1001 // add points to the fitter
1002 for (Int_t i = 0; i < entries; i++){
1003 Double_t x[1000];
1004 for (Int_t j=0; j<dim;j++) x[j]=values[j][i];
1005 fitter->AddPoint(x, values[dim][i], errors[i]);
1006 }
1007
1008 fitter->Eval();
1009 if (frac>0.5 && frac<1){
1010 fitter->EvalRobust(frac);
1011 }
1012 fitter->GetParameters(fitParam);
1013 fitter->GetCovarianceMatrix(covMatrix);
1014 chi2 = fitter->GetChisquare();
1015 npoints = entries;
cb1d20de 1016
1017 TString *preturnFormula = new TString("("), &returnFormula = *preturnFormula;
1018
1019 for (Int_t iparam = 0; iparam < dim; iparam++) {
1020 returnFormula.Append(Form("%s*(%f)",((TObjString*)formulaTokens->At(iparam))->GetName(),fitParam[iparam]));
1021 if (iparam < dim-1) returnFormula.Append("+");
1022 }
1023 returnFormula.Append(" )");
1024
1025
b8072cce 1026 for (Int_t j=0; j<dim+1;j++) delete [] values[j];
cb1d20de 1027
21f3a443 1028 delete formulaTokens;
1029 delete fitter;
1030 delete[] values;
b8072cce 1031 delete[] errors;
21f3a443 1032 return preturnFormula;
1033}
7c9cf6e4 1034
1035
1036
1037
1038
3d7cc0b4 1039Int_t TStatToolkit::GetFitIndex(const TString fString, const TString subString){
7c9cf6e4 1040 //
1041 // fitString - ++ separated list of fits
1042 // substring - ++ separated list of the requiered substrings
1043 //
1044 // return the last occurance of substring in fit string
1045 //
1046 TObjArray *arrFit = fString.Tokenize("++");
1047 TObjArray *arrSub = subString.Tokenize("++");
1048 Int_t index=-1;
1049 for (Int_t i=0; i<arrFit->GetEntries(); i++){
1050 Bool_t isOK=kTRUE;
1051 TString str =arrFit->At(i)->GetName();
1052 for (Int_t isub=0; isub<arrSub->GetEntries(); isub++){
1053 if (str.Contains(arrSub->At(isub)->GetName())==0) isOK=kFALSE;
1054 }
1055 if (isOK) index=i;
1056 }
1057 return index;
1058}
1059
1060
3d7cc0b4 1061TString TStatToolkit::FilterFit(const TString &input, const TString filter, TVectorD &param, TMatrixD & covar){
7c9cf6e4 1062 //
1063 // Filter fit expression make sub-fit
1064 //
1065 TObjArray *array0= input.Tokenize("++");
1066 TObjArray *array1= filter.Tokenize("++");
1067 //TString *presult=new TString("(0");
1068 TString result="(0.0";
1069 for (Int_t i=0; i<array0->GetEntries(); i++){
1070 Bool_t isOK=kTRUE;
1071 TString str(array0->At(i)->GetName());
1072 for (Int_t j=0; j<array1->GetEntries(); j++){
1073 if (str.Contains(array1->At(j)->GetName())==0) isOK=kFALSE;
1074 }
1075 if (isOK) {
1076 result+="+"+str;
1077 result+=Form("*(%f)",param[i+1]);
1078 printf("%f\t%f\t%s\n",param[i+1], TMath::Sqrt(covar(i+1,i+1)),str.Data());
1079 }
1080 }
1081 result+="-0.)";
1082 return result;
1083}
1084
1085void TStatToolkit::Update1D(Double_t delta, Double_t sigma, Int_t s1, TMatrixD &vecXk, TMatrixD &covXk){
1086 //
1087 // Update parameters and covariance - with one measurement
1088 // Input:
1089 // vecXk - input vector - Updated in function
1090 // covXk - covariance matrix - Updated in function
1091 // delta, sigma, s1 - new measurement, rms of new measurement and the index of measurement
1092 const Int_t knMeas=1;
1093 Int_t knElem=vecXk.GetNrows();
1094
1095 TMatrixD mat1(knElem,knElem); // update covariance matrix
1096 TMatrixD matHk(1,knElem); // vector to mesurement
1097 TMatrixD vecYk(knMeas,1); // Innovation or measurement residual
1098 TMatrixD matHkT(knElem,knMeas); // helper matrix Hk transpose
1099 TMatrixD matSk(knMeas,knMeas); // Innovation (or residual) covariance
1100 TMatrixD matKk(knElem,knMeas); // Optimal Kalman gain
1101 TMatrixD covXk2(knElem,knElem); // helper matrix
1102 TMatrixD covXk3(knElem,knElem); // helper matrix
1103 TMatrixD vecZk(1,1);
1104 TMatrixD measR(1,1);
1105 vecZk(0,0)=delta;
1106 measR(0,0)=sigma*sigma;
1107 //
1108 // reset matHk
1109 for (Int_t iel=0;iel<knElem;iel++)
1110 for (Int_t ip=0;ip<knMeas;ip++) matHk(ip,iel)=0;
1111 //mat1
1112 for (Int_t iel=0;iel<knElem;iel++) {
1113 for (Int_t jel=0;jel<knElem;jel++) mat1(iel,jel)=0;
1114 mat1(iel,iel)=1;
1115 }
1116 //
1117 matHk(0, s1)=1;
1118 vecYk = vecZk-matHk*vecXk; // Innovation or measurement residual
1119 matHkT=matHk.T(); matHk.T();
1120 matSk = (matHk*(covXk*matHkT))+measR; // Innovation (or residual) covariance
1121 matSk.Invert();
1122 matKk = (covXk*matHkT)*matSk; // Optimal Kalman gain
1123 vecXk += matKk*vecYk; // updated vector
1124 covXk2= (mat1-(matKk*matHk));
1125 covXk3 = covXk2*covXk;
1126 covXk = covXk3;
1127 Int_t nrows=covXk3.GetNrows();
1128
1129 for (Int_t irow=0; irow<nrows; irow++)
1130 for (Int_t icol=0; icol<nrows; icol++){
1131 // rounding problems - make matrix again symteric
1132 covXk(irow,icol)=(covXk3(irow,icol)+covXk3(icol,irow))*0.5;
1133 }
1134}
1135
1136
1137
3d7cc0b4 1138void TStatToolkit::Constrain1D(const TString &input, const TString filter, TVectorD &param, TMatrixD & covar, Double_t mean, Double_t sigma){
7c9cf6e4 1139 //
1140 // constrain linear fit
1141 // input - string description of fit function
1142 // filter - string filter to select sub fits
1143 // param,covar - parameters and covariance matrix of the fit
1144 // mean,sigma - new measurement uning which the fit is updated
1145 //
ae45c94d 1146
7c9cf6e4 1147 TObjArray *array0= input.Tokenize("++");
1148 TObjArray *array1= filter.Tokenize("++");
1149 TMatrixD paramM(param.GetNrows(),1);
1150 for (Int_t i=0; i<=array0->GetEntries(); i++){paramM(i,0)=param(i);}
1151
ae45c94d 1152 if (filter.Length()==0){
1153 TStatToolkit::Update1D(mean, sigma, 0, paramM, covar);//
1154 }else{
1155 for (Int_t i=0; i<array0->GetEntries(); i++){
1156 Bool_t isOK=kTRUE;
1157 TString str(array0->At(i)->GetName());
1158 for (Int_t j=0; j<array1->GetEntries(); j++){
1159 if (str.Contains(array1->At(j)->GetName())==0) isOK=kFALSE;
1160 }
1161 if (isOK) {
1162 TStatToolkit::Update1D(mean, sigma, i+1, paramM, covar);//
1163 }
7c9cf6e4 1164 }
1165 }
1166 for (Int_t i=0; i<=array0->GetEntries(); i++){
1167 param(i)=paramM(i,0);
1168 }
1169}
1170
ae45c94d 1171TString TStatToolkit::MakeFitString(const TString &input, const TVectorD &param, const TMatrixD & covar, Bool_t verbose){
7c9cf6e4 1172 //
1173 //
1174 //
1175 TObjArray *array0= input.Tokenize("++");
ae45c94d 1176 TString result=Form("(%f",param[0]);
1177 printf("%f\t%f\t\n", param[0], TMath::Sqrt(covar(0,0)));
7c9cf6e4 1178 for (Int_t i=0; i<array0->GetEntries(); i++){
1179 TString str(array0->At(i)->GetName());
1180 result+="+"+str;
1181 result+=Form("*(%f)",param[i+1]);
ae45c94d 1182 if (verbose) printf("%f\t%f\t%s\n", param[i+1], TMath::Sqrt(covar(i+1,i+1)),str.Data());
7c9cf6e4 1183 }
1184 result+="-0.)";
1185 return result;
1186}
df0a2a0a 1187
1188
1189TGraph * TStatToolkit::MakeGraphSparse(TTree * tree, const char * expr, const char * cut){
1190 //
1191 // Make a sparse draw of the variables
1192 //
1193 const Int_t entries = tree->Draw(expr,cut,"goff");
1194 // TGraph * graph = (TGraph*)gPad->GetPrimitive("Graph"); // 2D
1195 TGraph * graph = new TGraph (entries, tree->GetV2(),tree->GetV1());
1196 //
1197 Int_t *index = new Int_t[entries];
1198 TMath::Sort(entries,graph->GetX(),index,kFALSE);
1199
3d7cc0b4 1200 Double_t *tempArray = new Double_t[entries];
df0a2a0a 1201
1202 Double_t count = 0.5;
baa0041d 1203 Double_t *vrun = new Double_t[entries];
1204 Int_t icount=0;
1205 //
3d7cc0b4 1206 tempArray[index[0]] = count;
baa0041d 1207 vrun[0] = graph->GetX()[index[0]];
df0a2a0a 1208 for(Int_t i=1;i<entries;i++){
1209 if(graph->GetX()[index[i]]==graph->GetX()[index[i-1]])
3d7cc0b4 1210 tempArray[index[i]] = count;
df0a2a0a 1211 else if(graph->GetX()[index[i]]!=graph->GetX()[index[i-1]]){
1212 count++;
baa0041d 1213 icount++;
3d7cc0b4 1214 tempArray[index[i]] = count;
baa0041d 1215 vrun[icount]=graph->GetX()[index[i]];
df0a2a0a 1216 }
1217 }
1218
1219 const Int_t newNbins = int(count+0.5);
1220 Double_t *newBins = new Double_t[newNbins+1];
1221 for(Int_t i=0; i<=count+1;i++){
1222 newBins[i] = i;
1223 }
1224
3d7cc0b4 1225 TGraph *graphNew = new TGraph(entries,tempArray,graph->GetY());
df0a2a0a 1226 graphNew->GetXaxis()->Set(newNbins,newBins);
1227
1228 Char_t xName[50];
df0a2a0a 1229 for(Int_t i=0;i<count;i++){
baa0041d 1230 snprintf(xName,50,"%d",Int_t(vrun[i]));
df0a2a0a 1231 graphNew->GetXaxis()->SetBinLabel(i+1,xName);
1232 }
1233 graphNew->GetHistogram()->SetTitle("");
1234
3d7cc0b4 1235 delete [] tempArray;
df0a2a0a 1236 delete [] index;
1237 delete [] newBins;
baa0041d 1238 delete [] vrun;
df0a2a0a 1239 return graphNew;
1240}
1241