/************************************************************************** * Copyright(c) 1998-1999, ALICE Experiment at CERN, All rights reserved. * * * * Author: The ALICE Off-line Project. * * Contributors are mentioned in the code where appropriate. * * * * Permission to use, copy, modify and distribute this software and its * * documentation strictly for non-commercial purposes is hereby granted * * without fee, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission notice * * appear in the supporting documentation. The authors make no claims * * about the suitability of this software for any purpose. It is * * provided "as is" without express or implied warranty. * **************************************************************************/ /////////////////////////////////////////////////////////////////////////// // Class AliMathBase // // Subset of matheamtical functions not included in the TMath // /////////////////////////////////////////////////////////////////////////// #include "TMath.h" #include "AliMathBase.h" #include "Riostream.h" #include "TH1F.h" #include "TF1.h" #include "TLinearFitter.h" ClassImp(AliMathBase) // Class implementation to enable ROOT I/O AliMathBase::AliMathBase() : TObject() { // Default constructor } /////////////////////////////////////////////////////////////////////////// AliMathBase::~AliMathBase() { // Destructor } //_____________________________________________________________________________ void AliMathBase::EvaluateUni(Int_t nvectors, Double_t *data, Double_t &mean , Double_t &sigma, Int_t hh) { // // Robust estimator in 1D case MI version - (faster than ROOT version) // // For the univariate case // estimates of location and scatter are returned in mean and sigma parameters // the algorithm works on the same principle as in multivariate case - // it finds a subset of size hh with smallest sigma, and then returns mean and // sigma of this subset // if (hh==0) hh=(nvectors+2)/2; Double_t faclts[]={2.6477,2.5092,2.3826,2.2662,2.1587,2.0589,1.9660,1.879,1.7973,1.7203,1.6473}; Int_t *index=new Int_t[nvectors]; TMath::Sort(nvectors, data, index, kFALSE); Int_t nquant = TMath::Min(Int_t(Double_t(((hh*1./nvectors)-0.5)*40))+1, 11); Double_t factor = faclts[TMath::Max(0,nquant-1)]; Double_t sumx =0; Double_t sumx2 =0; Int_t bestindex = -1; Double_t bestmean = 0; Double_t bestsigma = data[index[nvectors-1]]-data[index[0]]; // maximal possible sigma for (Int_t i=0; i0){ // fix proper normalization - Anja factor = faclts[nquant-1]; } // // Double_t sumx =0; Double_t sumx2 =0; Int_t bestindex = -1; Double_t bestmean = 0; Double_t bestsigma = -1; for (Int_t i=0; iGetNbinsX(); Float_t nentries = his->GetEntries(); Float_t sum =0; Float_t mean = 0; Float_t sigma2 = 0; Float_t ncumul=0; for (Int_t ibin=1;ibinGetBinContent(ibin); Float_t fraction = Float_t(ncumul)/Float_t(nentries); if (fraction>down && fractionGetBinContent(ibin); mean+=his->GetBinCenter(ibin)*his->GetBinContent(ibin); sigma2+=his->GetBinCenter(ibin)*his->GetBinCenter(ibin)*his->GetBinContent(ibin); } } mean/=sum; sigma2= TMath::Sqrt(TMath::Abs(sigma2/sum-mean*mean)); if (param){ (*param)[0] = his->GetMaximum(); (*param)[1] = mean; (*param)[2] = sigma2; } if (verbose) printf("Mean\t%f\t Sigma2\t%f\n", mean,sigma2); } void AliMathBase::LTM(TH1F * his, TVectorD *param , Float_t fraction, Bool_t verbose){ // // LTM // Int_t nbins = his->GetNbinsX(); Int_t nentries = (Int_t)his->GetEntries(); Double_t *data = new Double_t[nentries]; Int_t npoints=0; for (Int_t ibin=1;ibinGetBinContent(ibin); Float_t xcenter= his->GetBinCenter(ibin); for (Int_t ic=0; icGetMaximum(); (*param)[1] = mean; (*param)[2] = sigma; } } Double_t AliMathBase::FitGaus(TH1F* his, TVectorD *param, TMatrixD *matrix, Float_t xmin, Float_t xmax, Bool_t verbose){ // // Fit histogram with gaussian function // // Prameters: // return value- chi2 - if negative ( not enough points) // his - input histogram // param - vector with parameters // xmin, xmax - range to fit - if xmin=xmax=0 - the full histogram range used // Fitting: // 1. Step - make logarithm // 2. Linear fit (parabola) - more robust - always converge // 3. In case of small statistic bins are averaged // static TLinearFitter fitter(3,"pol2"); TVectorD par(3); TVectorD sigma(3); TMatrixD mat(3,3); if (his->GetMaximum()<4) return -1; if (his->GetEntries()<12) return -1; if (his->GetRMS()GetEntries()*his->GetBinWidth(1)/TMath::Sqrt((2.*TMath::Pi()*his->GetRMS())); Int_t dsmooth = TMath::Nint(6./TMath::Sqrt(maxEstimate)); if (maxEstimate<1) return -1; Int_t nbins = his->GetNbinsX(); Int_t npoints=0; // if (xmin>=xmax){ xmin = his->GetXaxis()->GetXmin(); xmax = his->GetXaxis()->GetXmax(); } for (Int_t iter=0; iter<2; iter++){ fitter.ClearPoints(); npoints=0; for (Int_t ibin=1;ibinGetBinContent(ibin); for (Int_t delta = -dsmooth; delta<=dsmooth; delta++){ if (ibin+delta>1 &&ibin+deltaGetBinContent(ibin+delta); countB++; } } entriesI/=countB; Double_t xcenter= his->GetBinCenter(ibin); if (xcenterxmax) continue; Double_t error=1./TMath::Sqrt(countB); Float_t cont=2; if (iter>0){ if (par[0]+par[1]*xcenter+par[2]*xcenter*xcenter>20) return 0; cont = TMath::Exp(par[0]+par[1]*xcenter+par[2]*xcenter*xcenter); if (cont>1.) error = 1./TMath::Sqrt(cont*Float_t(countB)); } if (entriesI>1&&cont>1){ fitter.AddPoint(&xcenter,TMath::Log(Float_t(entriesI)),error); npoints++; } } if (npoints>3){ fitter.Eval(); fitter.GetParameters(par); }else{ break; } } if (npoints<=3){ return -1; } fitter.GetParameters(par); fitter.GetCovarianceMatrix(mat); if (TMath::Abs(par[1])Print(); printf("Chi2=%f\n",chi2); TF1 * f1= new TF1("f1","[0]*exp(-(x-[1])^2/(2*[2]*[2]))",his->GetXaxis()->GetXmin(),his->GetXaxis()->GetXmax()); f1->SetParameter(0, (*param)[0]); f1->SetParameter(1, (*param)[1]); f1->SetParameter(2, (*param)[2]); f1->Draw("same"); } return chi2; }