* @ingroup pwg2_forward_analysis_scripts
*/
AliAnalysisTask*
-AddTaskFMD(Int_t nCutBins=1, Float_t correctionCut=0.1)
+AddTaskFMD()
{
AliAnalysisManager *mgr = AliAnalysisManager::GetAnalysisManager();
if (!mgr) {
// by the rest of the algorithms - but only for the energy fitter
// algorithm.
task->GetEnergyFitter().SetEtaAxis(200, -4, 6);
+ // Set maximum energy loss to consider
+ task->GetEnergyFitter().SetMaxE(10);
+ // Set number of energy loss bins
+ task->GetEnergyFitter().SetNEbins(300);
+ // Set whether to use increasing bin sizes
+ task->GetEnergyFitter().SetUseIncreasingBins(true);
+ // Set whether to do fit the energy distributions
+ task->GetEnergyFitter().SetDoFits(kTRUE);
// Set the low cut used for energy
task->GetEnergyFitter().SetLowCut(0.4);
// Set the number of bins to subtract from maximum of distributions
// to get the lower bound of the fit range
- task->GetEnergyFitter().SetBinsToSubtract(4);
+ task->GetEnergyFitter().SetFitRangeBinWidth(4);
// Set the maximum number of landaus to try to fit (max 5)
- task->GetEnergyFitter().SetNLandau(5);
+ task->GetEnergyFitter().SetNParticles(5);
// Set the minimum number of entries in the distribution before
// trying to fit to the data
task->GetEnergyFitter().SetMinEntries(1000);
- // Set maximum energy loss to consider
- task->GetEnergyFitter().SetMaxE(10);
- // Set number of energy loss bins
- task->GetEnergyFitter().SetNEbins(300);
- // Set whether to use increasing bin sizes
- task->GetEnergyFitter().SetUseIncreasingBins(true);
// Set the low cut used for sharing
task->GetSharingFilter().SetLowCut(0.3);
// Set the number of extra bins (beyond the secondary map border)
- task->GetHistCollector().SetNCutBins(nCutBins);
+ task->GetHistCollector().SetNCutBins(1);
// Set the correction cut, that is, when bins in the secondary map
// is smaller than this, they are considered empty
- task->GetHistCollector().SetCorrectionCut(correctionCut);
+ task->GetHistCollector().SetCorrectionCut(0.1);
// Set the overall debug level (1: some output, 3: a lot of output)
task->SetDebug(0);
// Set the debug level of a single algorithm
/**
* Scale the histograms to the total number of events
*
+ * @param dir Where the output is stored
* @param nEvents Number of events
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
* Destructor
*/
~RingHistos();
+ /**
+ * Make output
+ *
+ * @param dir Where to put it
+ */
void Output(TList* dir);
/**
* Scale the histograms to the total number of events
*
+ * @param dir where the output is stored
* @param nEvents Number of events
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
/**
* Scale the histograms to the total number of events
*
+ * @param dir where to put the output
* @param nEvents Number of events
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
* Destructor
*/
~RingHistos();
+ /**
+ * Make output
+ *
+ * @param dir Where to put it
+ */
void Output(TList* dir);
/**
* Scale the histograms to the total number of events
*
+ * @param dir Where the output is
* @param nEvents Number of events
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
; // This is for Emacs
#endif
-#define N_A(N) (4+N-2)
-#define N2_A(N) (4+(N-2)*3)
-#define N2_D(N) (4+(N-2)*3+1)
-#define N2_X(N) (4+(N-2)*3+2)
-
-//____________________________________________________________________
-namespace {
- Double_t
- NLandau(Double_t* xp, Double_t* pp)
- {
- Double_t e = xp[0];
- Double_t constant = pp[0];
- Double_t mpv = pp[1];
- Double_t fwhm = pp[2];
- Int_t n = Int_t(pp[3]);
- Double_t result = 0;
- for (Int_t i = 1; i <= n; i++) {
- Double_t mpvi = i*(mpv+fwhm*TMath::Log(i));
- Double_t fwhmi = i*fwhm;
- Double_t ai = (i == 1 ? 1 : pp[N_A(i)]);
- result += ai * TMath::Landau(e,mpvi,fwhmi,kTRUE);
- }
- result *= constant;
- return result;
- }
-
- Double_t
- NLandau2(Double_t* xp, Double_t* pp)
- {
- Double_t e = xp[0];
- Double_t constant = pp[0];
- Double_t mpv = pp[1];
- Double_t fwhm = pp[2];
- Int_t n = Int_t(pp[3]);
- Double_t result = TMath::Landau(e,mpv,fwhm,kTRUE);
- for (Int_t i = 2; i <= n; i++) {
- Double_t ai = pp[N2_A(i)];
- Double_t mpvi = pp[N2_D(i)];
- Double_t fwhmi = pp[N2_X(i)];
- result += ai * TMath::Landau(e,mpvi,fwhmi,kTRUE);
- }
- result *= constant;
- return result;
- }
-
- Double_t
- TripleLandau(Double_t *x, Double_t *par)
- {
- Double_t energy = x[0];
- Double_t constant = par[0];
- Double_t mpv = par[1];
- Double_t fwhm = par[2];
- Double_t alpha = par[3];
- Double_t beta = par[4];
- Double_t mpv2 = 2*mpv+2*fwhm*TMath::Log(2);
- Double_t mpv3 = 3*mpv+3*fwhm*TMath::Log(3);
-
- Double_t f = constant * (TMath::Landau(energy,mpv,fwhm,kTRUE)+
- alpha * TMath::Landau(energy,mpv2,2*fwhm,kTRUE)+
- beta * TMath::Landau(energy,mpv3,3*fwhm,kTRUE));
-
- return f;
- }
- Double_t
- DoubleLandau(Double_t *x, Double_t *par)
- {
- Double_t energy = x[0];
- Double_t constant = par[0];
- Double_t mpv = par[1];
- Double_t fwhm = par[2];
- Double_t alpha = par[3];
- Double_t mpv2 = 2*mpv+2*fwhm*TMath::Log(2);
-
- Double_t f = constant * (TMath::Landau(energy,mpv,fwhm,kTRUE)+
- alpha * TMath::Landau(energy,mpv2,2*fwhm,kTRUE));
-
- return f;
- }
- Double_t
- SingleLandau(Double_t *x, Double_t *par)
- {
- Double_t energy = x[0];
- Double_t constant = par[0];
- Double_t mpv = par[1];
- Double_t fwhm = par[2];
-
- Double_t f = constant * TMath::Landau(energy,mpv,fwhm,kTRUE);
-
- return f;
- }
-}
//____________________________________________________________________
AliFMDEnergyFitter::AliFMDEnergyFitter()
: TNamed(),
fRingHistos(),
fLowCut(0.3),
- fNLandau(3),
+ fNParticles(3),
fMinEntries(100),
- fBinsToSubtract(4),
+ fFitRangeBinWidth(4),
fDoFits(false),
fEtaAxis(),
fMaxE(10),
fNEbins(300),
fUseIncreasingBins(true),
+ fMaxRelParError(.25),
+ fMaxChi2PerNDF(20),
fDebug(0)
{}
: TNamed("fmdEnergyFitter", title),
fRingHistos(),
fLowCut(0.3),
- fNLandau(3),
+ fNParticles(3),
fMinEntries(100),
- fBinsToSubtract(4),
+ fFitRangeBinWidth(4),
fDoFits(false),
fEtaAxis(0,0,0),
fMaxE(10),
fNEbins(300),
fUseIncreasingBins(true),
+ fMaxRelParError(.25),
+ fMaxChi2PerNDF(20),
fDebug(3)
{
fEtaAxis.SetName("etaAxis");
: TNamed(o),
fRingHistos(),
fLowCut(o.fLowCut),
- fNLandau(o.fNLandau),
+ fNParticles(o.fNParticles),
fMinEntries(o.fMinEntries),
- fBinsToSubtract(o.fBinsToSubtract),
+ fFitRangeBinWidth(o.fFitRangeBinWidth),
fDoFits(o.fDoFits),
fEtaAxis(o.fEtaAxis),
fMaxE(o.fMaxE),
fNEbins(o.fNEbins),
fUseIncreasingBins(o.fUseIncreasingBins),
+ fMaxRelParError(o.fMaxRelParError),
+ fMaxChi2PerNDF(o.fMaxChi2PerNDF),
fDebug(o.fDebug)
{
TIter next(&o.fRingHistos);
TNamed::operator=(o);
fLowCut = o.fLowCut;
- fNLandau = o.fNLandau;
+ fNParticles = o.fNParticles;
fMinEntries = o.fMinEntries;
- fBinsToSubtract= o.fBinsToSubtract;
+ fFitRangeBinWidth= o.fFitRangeBinWidth;
fDoFits = o.fDoFits;
fEtaAxis.Set(o.fEtaAxis.GetNbins(),o.fEtaAxis.GetXmin(),o.fEtaAxis.GetXmax());
fDebug = o.fDebug;
// +1 for chi^2
// +3 for 1 landau
// +1 for N
- // +fNLandau-1 for weights
- Int_t nStack = 1+3+1+fNLandau-1;
+ // +fNParticles-1 for weights
+ Int_t nStack = kN+fNParticles;
THStack* stack[nStack];
- stack[0] = new THStack("chi2", "#chi^{2}/#nu");
- stack[1] = new THStack("c", "constant");
- stack[2] = new THStack("mpv", "#Delta_{p}");
- stack[3] = new THStack("w", "w");
- stack[4] = new THStack("n", "# of Landaus");
- for (Int_t i = 2; i <= fNLandau; i++)
- stack[i-1+4] = new THStack(Form("a%d", i),
- Form("Weight of %d signal", i));
+ stack[0] = new THStack("chi2", "#chi^{2}/#nu");
+ stack[kC +1] = new THStack("c", "Constant");
+ stack[kDelta +1] = new THStack("delta", "#Delta_{p}");
+ stack[kXi +1] = new THStack("xi", "#xi");
+ stack[kSigma +1] = new THStack("sigma", "#sigma");
+ stack[kSigmaN+1] = new THStack("sigman", "#sigma_{n}");
+ stack[kN +1] = new THStack("n", "# Particles");
+ for (Int_t i = 2; i <= fNParticles; i++)
+ stack[kN+i] = new THStack(Form("a%d", i), Form("a_{%d}", i));
for (Int_t i = 0; i < nStack; i++)
d->Add(stack[i]);
TIter next(&fRingHistos);
RingHistos* o = 0;
while ((o = static_cast<RingHistos*>(next()))) {
- TObjArray* l = o->Fit(d, fEtaAxis, fLowCut, fNLandau,
- fMinEntries, fBinsToSubtract);
+ TObjArray* l = o->Fit(d, fEtaAxis, fLowCut, fNParticles,
+ fMinEntries, fFitRangeBinWidth,
+ fMaxRelParError, fMaxChi2PerNDF);
if (!l) continue;
for (Int_t i = 0; i < l->GetEntriesFast(); i++) {
stack[i % nStack]->Add(static_cast<TH1*>(l->At(i)));
//____________________________________________________________________
void
-AliFMDEnergyFitter::RingHistos::Make(Int_t ieta, Double_t emin, Double_t emax,
- Double_t deMax, Int_t nDeBins,
- Bool_t incr)
+AliFMDEnergyFitter::RingHistos::Make(Int_t ieta,
+ Double_t emin,
+ Double_t emax,
+ Double_t deMax,
+ Int_t nDeBins,
+ Bool_t incr)
{
TH1D* h = 0;
TString name = Form("%s_etabin%03d", fName.Data(), ieta);
fEtaEDists.AddAt(h, ieta-1);
}
//____________________________________________________________________
-TH1D*
-AliFMDEnergyFitter::RingHistos::MakePar(const char* name,
- const char* title,
- const TAxis& eta) const
+TH1D* AliFMDEnergyFitter::RingHistos::MakePar(const char* name,
+ const char* title,
+ const TAxis& eta) const
{
TH1D* h = new TH1D(Form("%s_%s", fName.Data(), name),
Form("%s for %s", title, fName.Data()),
//____________________________________________________________________
TObjArray*
AliFMDEnergyFitter::RingHistos::Fit(TList* dir, const TAxis& eta,
- Double_t lowCut, UShort_t nLandau,
+ Double_t lowCut,
+ UShort_t nParticles,
UShort_t minEntries,
- UShort_t minusBins) const
+ UShort_t minusBins,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const
{
+ // Get our ring list from the output container
TList* l = GetOutputList(dir);
if (!l) return 0;
+ // Get the energy distributions from the output container
TList* dists = static_cast<TList*>(l->FindObject("EDists"));
if (!dists) {
AliWarning(Form("Didn't find %s_EtaEDists in %s",
return 0;
}
- TObjArray* pars = new TObjArray(3+nLandau+1);
+ // Container of the fit results histograms
+ TObjArray* pars = new TObjArray(3+nParticles+1);
pars->SetName("FitResults");
l->Add(pars);
- TH1* hChi2 = 0;
- TH1* hC = 0;
- TH1* hMpv = 0;
- TH1* hW = 0;
- TH1* hS = 0;
- TH1* hN = 0;
- TH1* hA[nLandau-1];
- pars->Add(hChi2 = MakePar("chi2", "#chi^{2}/#nu", eta));
- pars->Add(hC = MakePar("c", "Constant", eta));
- pars->Add(hMpv = MakePar("mpv", "#Delta_{p}", eta));
- pars->Add(hW = MakePar("w", "#xi", eta));
- pars->Add(hS = MakePar("s", "#sigma", eta));
- pars->Add(hN = MakePar("n", "N", eta));
- for (UShort_t i = 1; i < nLandau; i++)
+ // Result objects stored in separate list on the output
+ TH1* hChi2 = 0;
+ TH1* hC = 0;
+ TH1* hDelta = 0;
+ TH1* hXi = 0;
+ TH1* hSigma = 0;
+ TH1* hSigmaN = 0;
+ TH1* hN = 0;
+ TH1* hA[nParticles-1];
+ pars->Add(hChi2 = MakePar("chi2", "#chi^{2}/#nu", eta));
+ pars->Add(hC = MakePar("c", "Constant", eta));
+ pars->Add(hDelta = MakePar("delta", "#Delta_{p}", eta));
+ pars->Add(hXi = MakePar("xi", "#xi", eta));
+ pars->Add(hSigma = MakePar("sigma", "#sigma", eta));
+ pars->Add(hSigmaN = MakePar("sigman", "#sigma_{n}", eta));
+ pars->Add(hN = MakePar("n", "N", eta));
+ for (UShort_t i = 1; i < nParticles; i++)
pars->Add(hA[i-1] = MakePar(Form("a%d",i+1), Form("a_{%d}",i+1), eta));
Int_t high = 0;
for (Int_t i = 0; i < nDists; i++) {
TH1D* dist = static_cast<TH1D*>(dists->At(i));
- if (!dist || dist->GetEntries() <= minEntries) continue;
+ // Ignore empty histograms altoghether
+ if (!dist || dist->GetEntries() <= 0) continue;
+ // Scale to the bin-width
+ dist->Scale(1., "width");
+
+ // Normalize peak to 1
+ Double_t max = dist->GetMaximum();
+ dist->Scale(1/max);
+
+ // Check that we have enough entries
+ if (dist->GetEntries() <= minEntries) continue;
- TF1* res = FitHist(dist,lowCut,nLandau,minusBins);
+ // Now fit
+ TF1* res = FitHist(dist,lowCut,nParticles,minusBins,
+ relErrorCut,chi2nuCut);
if (!res) continue;
+ // dist->GetListOfFunctions()->Add(res);
+ // Store eta limits
low = TMath::Min(low,i+1);
high = TMath::Max(high,i+1);
+ // Get the reduced chi square
Double_t chi2 = res->GetChisquare();
Int_t ndf = res->GetNDF();
- hChi2->SetBinContent(i+1, ndf > 0 ? chi2 / ndf : 0);
- hC ->SetBinContent(i+1, res->GetParameter(0));
- hMpv->SetBinContent(i+1, res->GetParameter(1));
- hW ->SetBinContent(i+1, res->GetParameter(2));
- hN ->SetBinContent(i+1, res->GetParameter(3));
-
- hC ->SetBinError(i+1, res->GetParError(1));
- hMpv->SetBinError(i+1, res->GetParError(2));
- hW ->SetBinError(i+1, res->GetParError(2));
- // hN ->SetBinError(i, res->GetParError(3));
-
- for (UShort_t j = 0; j < nLandau-1; j++) {
- hA[j]->SetBinContent(i+1, res->GetParameter(4+j));
- hA[j]->SetBinError(i+1, res->GetParError(4+j));
+
+ // Store results of best fit in output histograms
+ res->SetLineWidth(4);
+ hChi2 ->SetBinContent(i+1, ndf > 0 ? chi2 / ndf : 0);
+ hC ->SetBinContent(i+1, res->GetParameter(kC));
+ hDelta ->SetBinContent(i+1, res->GetParameter(kDelta));
+ hXi ->SetBinContent(i+1, res->GetParameter(kXi));
+ hSigma ->SetBinContent(i+1, res->GetParameter(kSigma));
+ hSigmaN ->SetBinContent(i+1, res->GetParameter(kSigmaN));
+ hN ->SetBinContent(i+1, res->GetParameter(kN));
+
+ hC ->SetBinError(i+1, res->GetParError(kC));
+ hDelta ->SetBinError(i+1, res->GetParError(kDelta));
+ hXi ->SetBinError(i+1, res->GetParError(kXi));
+ hSigma ->SetBinError(i+1, res->GetParError(kSigma));
+ hSigmaN->SetBinError(i+1, res->GetParError(kSigmaN));
+ hN ->SetBinError(i+1, res->GetParError(kN));
+
+ for (UShort_t j = 0; j < nParticles-1; j++) {
+ hA[j]->SetBinContent(i+1, res->GetParameter(kA+j));
+ hA[j]->SetBinError(i+1, res->GetParError(kA+j));
}
}
+ // Fit the full-ring histogram
TH1* total = GetOutputHist(l, Form("%s_edist", fName.Data()));
if (total && total->GetEntries() >= minEntries) {
- TF1* res = FitHist(total,lowCut,nLandau,minusBins);
+ TF1* res = FitHist(total,lowCut,nParticles,minusBins,
+ relErrorCut,chi2nuCut);
if (res) {
+ // Make histograms for the result of this fit
Double_t chi2 = res->GetChisquare();
Int_t ndf = res->GetNDF();
- pars->Add(MakeTotal("t_chi2", "#chi^{2}/#nu", eta, low, high,
+ pars->Add(MakeTotal("t_chi2", "#chi^{2}/#nu", eta, low, high,
ndf > 0 ? chi2/ndf : 0, 0));
- pars->Add(MakeTotal("t_c", "Constant", eta, low, high,
- res->GetParameter(0),res->GetParError(0)));
- pars->Add(MakeTotal("t_mpv", "#Delta_{p}", eta, low, high,
- res->GetParameter(1),res->GetParError(1)));
- pars->Add(MakeTotal("t_w", "#xi", eta, low, high,
- res->GetParameter(2),res->GetParError(2)));
- pars->Add(MakeTotal("t_n", "N", eta, low, high,
- res->GetParameter(3),0));
- for (UShort_t j = 1; j < nLandau; j++)
- pars->Add(MakeTotal(Form("a%d_t",j+1),
- Form("a_{%d}",j+1), eta, low, high,
- res->GetParameter(3+j), res->GetParError(3+j)));
+ pars->Add(MakeTotal("t_c", "Constant", eta, low, high,
+ res->GetParameter(kC),
+ res->GetParError(kC)));
+ pars->Add(MakeTotal("t_delta", "#Delta_{p}", eta, low, high,
+ res->GetParameter(kDelta),
+ res->GetParError(kDelta)));
+ pars->Add(MakeTotal("t_xi", "#xi", eta, low, high,
+ res->GetParameter(kXi),
+ res->GetParError(kXi)));
+ pars->Add(MakeTotal("t_sigma", "#sigma", eta, low, high,
+ res->GetParameter(kSigma),
+ res->GetParError(kSigma)));
+ pars->Add(MakeTotal("t_sigman", "#sigma_{n}", eta, low, high,
+ res->GetParameter(kSigmaN),
+ res->GetParError(kSigmaN)));
+ pars->Add(MakeTotal("t_n", "N", eta, low, high,
+ res->GetParameter(kN),0));
+ for (UShort_t j = 0; j < nParticles-1; j++)
+ pars->Add(MakeTotal(Form("t_a%d",j+2),
+ Form("a_{%d}",j+2), eta, low, high,
+ res->GetParameter(kA+j),
+ res->GetParError(kA+j)));
}
}
+ // Clean up list of histogram. Histograms with no entries or
+ // no functions are deleted. We have to do this using the TObjLink
+ // objects stored in the list since ROOT cannot guaranty the validity
+ // of iterators when removing from a list - tsck. Should just implement
+ // TIter::Remove().
TObjLink* lnk = dists->FirstLink();
while (lnk) {
TH1* h = static_cast<TH1*>(lnk->GetObject());
TF1*
AliFMDEnergyFitter::RingHistos::FitHist(TH1* dist,
Double_t lowCut,
- UShort_t nLandau,
- UShort_t minusBins) const
+ UShort_t nParticles,
+ UShort_t minusBins,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const
{
Double_t maxRange = 10;
+ // Create a fitter object
AliForwardUtil::ELossFitter f(lowCut, maxRange, minusBins);
f.Clear();
+
// If we are only asked to fit a single particle, return this fit,
// no matter what.
- if (nLandau == 1) {
+ if (nParticles == 1) {
TF1* r = f.Fit1Particle(dist, 0);
if (!r) return 0;
return new TF1(*r);
}
// Fit from 2 upto n particles
- for (Int_t i = 2; i <= nLandau; i++) f.FitNParticle(dist, i, 0);
+ for (Int_t i = 2; i <= nParticles; i++) f.FitNParticle(dist, i, 0);
// Now, we need to select the best fit
Int_t nFits = f.fFitResults.GetEntriesFast();
TF1* good[nFits];
for (Int_t i = nFits-1; i >= 0; i--) {
- if (CheckResult(static_cast<TFitResult*>(f.fFitResults.At(i))))
- good[i] = static_cast<TF1*>(f.fFunctions.At(i));
+ good[i] = 0;
+ TF1* ff = static_cast<TF1*>(f.fFunctions.At(i));
+ // ff->SetLineColor(Color());
+ ff->SetRange(0, maxRange);
+ dist->GetListOfFunctions()->Add(new TF1(*ff));
+ if (CheckResult(static_cast<TFitResult*>(f.fFitResults.At(i)),
+ relErrorCut, chi2nuCut)) {
+ good[i] = ff;
+ ff->SetLineWidth(2);
+ // f.fFitResults.At(i)->Print("V");
+ }
}
// If all else fails, use the 1 particle fit
TF1* ret = static_cast<TF1*>(f.fFunctions.At(0));
+
+ // Find the fit with the most valid particles
for (Int_t i = nFits-1; i > 0; i--) {
if (!good[i]) continue;
+ if (fDebug > 1) {
+ AliInfo(Form("Choosing fit with n=%d", i+1));
+ f.fFitResults.At(i)->Print();
+ }
ret = good[i];
break;
}
- return new TF1(*ret);
+ // Give a warning if we're using fall-back
+ if (ret == f.fFunctions.At(0))
+ AliWarning("Choosing fall-back 1 particle fit");
+
+ // Copy our result and return (the functions are owned by the fitter)
+ TF1* fret = new TF1(*ret);
+ return fret;
}
//____________________________________________________________________
Bool_t
-AliFMDEnergyFitter::RingHistos::CheckResult(TFitResult* r) const
+AliFMDEnergyFitter::RingHistos::CheckResult(TFitResult* r,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const
{
+ if (fDebug > 10) r->Print();
+ TString n = r->GetName();
+ n.ReplaceAll("TFitResult-", "");
Double_t chi2 = r->Chi2();
Int_t ndf = r->Ndf();
// Double_t prob = r.Prob();
- if (ndf <= 0 || chi2 / ndf > 5) {
+ Bool_t ret = kTRUE;
+
+ // Check that the reduced chi square isn't larger than 5
+ if (ndf <= 0 || chi2 / ndf > chi2nuCut) {
if (fDebug > 2)
- AliWarning(Form("Fit %s has chi^2/ndf=%f/%d=%f",
- r->GetName(), chi2, ndf, (ndf<0 ? 0 : chi2/ndf)));
- return kFALSE;
+ AliWarning(Form("%s: chi^2/ndf=%12.5f/%3d=%12.5f>%12.5f",
+ n.Data(), chi2, ndf, (ndf<0 ? 0 : chi2/ndf),
+ chi2nuCut));
+ ret = kFALSE;
}
+ // Check each parameter
UShort_t nPar = r->NPar();
for (UShort_t i = 0; i < nPar; i++) {
- if (i == 3) continue;
+ if (i == kN) continue; // Skip the number parameter
- Double_t v = r->Parameter(i);
- Double_t e = r->ParError(i);
+ // Get value and error and check value
+ Double_t v = r->Parameter(i);
+ Double_t e = r->ParError(i);
if (v == 0) continue;
- if (v == 0 || e / v > 0.2) {
+
+ // Calculate the relative error and check it
+ Double_t re = e / v;
+ Double_t cut = relErrorCut * (i < kN ? 1 : 2);
+ if (re > cut) {
if (fDebug > 2)
- AliWarning(Form("Fit %s has Delta %s/%s=%f/%f=%f%%",
- r->GetName(), r->ParName(i).c_str(),
- r->ParName(i).c_str(), e, v, 100*(v == 0 ? 0 : e/v)));
- return kFALSE;
+ AliWarning(Form("%s: Delta %s/%s=%9.5f/%9.5f=%5.1f%%>%5.1f%%",
+ n.Data(), r->ParName(i).c_str(),
+ r->ParName(i).c_str(), e, v,
+ 100*(v == 0 ? 0 : e/v),
+ 100*(cut)));
+ ret = kFALSE;
}
}
- if (nPar > 5) {
+
+ // Check if we have scale parameters
+ if (nPar > kN) {
+
+ // Check that the last particle has a significant contribution
Double_t lastScale = r->Parameter(nPar-1);
if (lastScale <= 1e-7) {
if (fDebug)
- AliWarning(Form("Last scale %s is too small %f<1e-7",
- r->ParName(nPar-1).c_str(), lastScale));
- return kFALSE;
+ AliWarning(Form("%s: %s=%9.6f<1e-7",
+ n.Data(), r->ParName(nPar-1).c_str(), lastScale));
+ ret = kFALSE;
}
}
- return kTRUE;
+ return ret;
}
class AliFMDEnergyFitter : public TNamed
{
public:
+ enum {
+ kC = AliForwardUtil::ELossFitter::kC,
+ kDelta = AliForwardUtil::ELossFitter::kDelta,
+ kXi = AliForwardUtil::ELossFitter::kXi,
+ kSigma = AliForwardUtil::ELossFitter::kSigma,
+ kSigmaN = AliForwardUtil::ELossFitter::kSigmaN,
+ kN = AliForwardUtil::ELossFitter::kN,
+ kA = AliForwardUtil::ELossFitter::kA
+ };
+
/**
* Destructor
*/
*
* @param n
*/
- void SetBinsToSubtract(UShort_t n=4) { fBinsToSubtract = n; }
+ void SetFitRangeBinWidth(UShort_t n=4) { fFitRangeBinWidth = n; }
/**
* Whether or not to enable fitting of the final merged result.
* Note, fitting takes quite a while and one should be careful not to do
*
* @param doFit Whether to do the fits or not
*/
- void DoFits(Bool_t doFit=kTRUE) { fDoFits = doFit; }
+ void SetDoFits(Bool_t doFit=kTRUE) { fDoFits = doFit; }
/**
- * Whether or not to enable fitting of the final merged result.
- * Note, fitting takes quite a while and one should be careful not to do
- * this needlessly
+ * Set how many particles we will try to fit at most to the data
*
- * @param doFit Whether to do the fits or not
+ * @param n Max number of particle to try to fit
*/
- void SetNLandau(UShort_t n) { fNLandau = (n < 1 ? 1 : (n > 5 ? 5 : n)); }
+ void SetNParticles(UShort_t n) { fNParticles = (n < 1 ? 1 : (n > 5 ? 5 : n)); }
/**
- * Whether or not to enable fitting of the final merged result.
- * Note, fitting takes quite a while and one should be careful not to do
- * this needlessly
+ * Set the minimum number of entries each histogram must have
+ * before we try to fit our response function to it
*
- * @param doFit Whether to do the fits or not
+ * @param n Minimum number of entries
*/
void SetMinEntries(UShort_t n) { fMinEntries = (n < 1 ? 1 : n); }
/**
* @param x Number of energy loss bins
*/
void SetNEbins(Int_t x) { fNEbins = x; }
+ void SetMaxRelativeParameterError(Double_t e) { fMaxRelParError = e; }
+ void SetMaxChi2PerNDF(Double_t c) { fMaxChi2PerNDF = c; }
/**
* Set wheter to use increasing bin sizes
*
/**
* Scale the histograms to the total number of events
*
- * @param nEvents Number of events
+ * @param dir Where the histograms are
*/
void Fit(TList* dir);
/**
* Initialise object
*
- * @param eAxis
+ * @param eAxis Eta axis
+ * @param maxDE Max energy loss to consider
+ * @param nDEbins Number of bins
+ * @param useIncrBin Whether to use an increasing bin size
*/
void Init(const TAxis& eAxis,
Double_t maxDE=10,
*/
void Fill(Bool_t empty, Int_t ieta, Double_t mult);
/**
- * Scale the histograms to the total number of events
+ * Fit each histogram to up to @a nParticles particle responses.
*
- * @param dir Output list
- * @param eta Eta axis
- * @param lowCut Lower cut
- * @param nLandau Max number of convolved landaus to fit
+ * @param dir Output list
+ * @param eta Eta axis
+ * @param lowCut Lower cut
+ * @param nParticles Max number of convolved landaus to fit
+ * @param minEntries Minimum number of entries
+ * @param minusBins Number of bins from peak to subtract to
+ * get the fit range
+ * @param relErrorCut Cut applied to relative error of parameter.
+ * Note, for multi-particle weights, the cut
+ * is loosend by a factor of 2
+ * @param chi2nuCut Cut on @f$ \chi^2/\nu@f$ -
+ * the reduced @f$\chi^2@f$
*/
TObjArray* Fit(TList* dir,
const TAxis& eta,
Double_t lowCut,
- UShort_t nLandau,
+ UShort_t nParticles,
UShort_t minEntries,
- UShort_t minusBins) const;
+ UShort_t minusBins,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const;
/**
- * Fit a signal histogram
+ * Fit a signal histogram. First, the bin @f% b_{min}@f$ with
+ * maximum bin content in the range @f$ [E_{min},\infty]@f$ is
+ * found. Then the fit range is set to the bin range
+ * @f$ [b_{min}-\Delta b,b_{min}+2\Delta b]@f$, and a 1
+ * particle signal is fitted to that. The parameters of that fit
+ * is then used as seeds for a fit of the @f$ N@f$ particle response
+ * to the data in the range
+ * @f$ [b_{min}-\Delta b,N(\Delta_1+\xi_1\log(N))+2N\xi@f$
*
- * @param dist Historgam to fit
- * @param lowCut Lower cut on signal
- * @param nLandau Max number of convolved landaus to fit
+ * @param dist Histogram to fit
+ * @param lowCut Lower cut @f$ E_{min}@f$ on signal
+ * @param nParticles Max number @f$ N@f$ of convolved landaus to fit
+ * @param minusBins Number of bins @f$ \Delta b@f$ from peak to
+ * subtract to get the fit range
+ * @param relErrorCut Cut applied to relative error of parameter.
+ * Note, for multi-particle weights, the cut
+ * is loosend by a factor of 2
+ * @param chi2nuCut Cut on @f$ \chi^2/\nu@f$ -
+ * the reduced @f$\chi^2@f$
*
* @return The best fit function
*/
TF1* FitHist(TH1* dist,
Double_t lowCut,
- UShort_t nLandau,
- UShort_t minusBins) const;
+ UShort_t nParticles,
+ UShort_t minusBins,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const;
/**
* Check the result of the fit. Returns true if
- * - the reduced @f$ \chi^2/\nu@f$ is less than 5
- * - and that the relative error @f$ \Delta p_i/p_i@f$ on each
- * parameter is less than 20 percent.
- * - If this is a fit to N particles if the N particle weight is
- * larger than @$f 10^{-7}@f$
+ * - @f$ \chi^2/\nu < \max{\chi^2/\nu}@f$
+ * - @f$ \Delta p_i/p_i < \delta_e@f$ for all parameters. Note,
+ * for multi-particle fits, this requirement is relaxed by a
+ * factor of 2
+ * - @f$ a_{n} > 10^{-7}@f$ when fitting to an @f$ n@f$
+ * particle response
*
- * @param r Result to check
+ * @param r Result to check
+ * @param relErrorCut Cut @f$ \delta_e@f$ applied to relative error
+ * of parameter.
+ * @param chi2nuCut Cut @f$ \max{\chi^2/\nu}@f$
*
* @return true if fit is good.
*/
- Bool_t CheckResult(TFitResult* r) const;
+ Bool_t CheckResult(TFitResult* r,
+ Double_t relErrorCut,
+ Double_t chi2nuCut) const;
/**
* Make an axis with increasing bins
*
TList fRingHistos; // List of histogram containers
Double_t fLowCut; // Low cut on energy
- UShort_t fNLandau; // Number of landaus to try to fit
+ UShort_t fNParticles; // Number of landaus to try to fit
UShort_t fMinEntries; // Minimum number of entries
- UShort_t fBinsToSubtract;// Number of bins to subtract from found max
+ UShort_t fFitRangeBinWidth;// Number of bins to subtract from found max
Bool_t fDoFits; // Wheter to actually do the fits
TAxis fEtaAxis; // Eta axis
Double_t fMaxE; // Maximum energy loss to consider
Int_t fNEbins; // Number of energy loss bins
Bool_t fUseIncreasingBins; // Wheter to use increasing bin sizes
+ Double_t fMaxRelParError;// Relative error cut
+ Double_t fMaxChi2PerNDF; // chi^2/nu cit
Int_t fDebug; // Debug level
ClassDef(AliFMDEnergyFitter,1); //
* @param lowFlux On return, true if the event is considered a low-flux
* event (according to the setting of fLowFluxCut)
* @param ivz On return, the found vertex bin (zero-based)
+ * @param vz On return, the z position of the interaction
*
* @return 0 (or kOk) on success, otherwise a bit mask of error codes
*/
* @param input Input
* @param lowFlux If this is a low-flux event
* @param output Output AliESDFMD object
- * @param vz Current vertex position
*
* @return True on success, false otherwise
*/
/**
* Scale the histograms to the total number of events
*
+ * @param dir Where the output is
* @param nEvents Number of events
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
*/
~RingHistos();
/**
- * Initialise this object
+ * Clear this object
*/
void Clear(const Option_t* ="") { fNHits = 0; }
+ /**
+ * Increase number of hits
+ *
+ */
void Incr() { fNHits++; }
+ /**
+ * Finish off
+ *
+ */
void Finish();
+ /**
+ * Make output
+ *
+ * @param dir where to store
+ */
void Output(TList* dir);
/**
* Scale the histograms to the total number of events
*
* @param nEvents Number of events
+ * @param dir Where the output is
*/
void ScaleHistograms(TList* dir, Int_t nEvents);
TH1D* fBefore; // Distribution of signals before filter
Double_t landauGaus1(Double_t* xp, Double_t* pp)
{
Double_t x = xp[0];
- Double_t constant = pp[0];
- Double_t delta = pp[1];
- Double_t xi = pp[2];
- Double_t sigma = pp[3];
- Double_t sigma_n = pp[4];
+ Double_t constant = pp[AliForwardUtil::ELossFitter::kC];
+ Double_t delta = pp[AliForwardUtil::ELossFitter::kDelta];
+ Double_t xi = pp[AliForwardUtil::ELossFitter::kXi];
+ Double_t sigma = pp[AliForwardUtil::ELossFitter::kSigma];
+ Double_t sigma_n = pp[AliForwardUtil::ELossFitter::kSigmaN];
return constant * AliForwardUtil::LandauGaus(x, delta, xi, sigma, sigma_n);
}
Double_t landauGausN(Double_t* xp, Double_t* pp)
{
Double_t x = xp[0];
- Double_t constant = pp[0];
- Double_t delta = pp[1];
- Double_t xi = pp[2];
- Double_t sigma = pp[3];
- Double_t sigma_n = pp[4];
- Int_t n = Int_t(pp[5]);
- Double_t* a = &(pp[6]);
+ Double_t constant = pp[AliForwardUtil::ELossFitter::kC];
+ Double_t delta = pp[AliForwardUtil::ELossFitter::kDelta];
+ Double_t xi = pp[AliForwardUtil::ELossFitter::kXi];
+ Double_t sigma = pp[AliForwardUtil::ELossFitter::kSigma];
+ Double_t sigma_n = pp[AliForwardUtil::ELossFitter::kSigmaN];
+ Int_t n = Int_t(pp[AliForwardUtil::ELossFitter::kN]);
+ Double_t* a = &(pp[AliForwardUtil::ELossFitter::kA]);
return constant * AliForwardUtil::NLandauGaus(x, delta, xi, sigma, sigma_n,
n, a);
{
Double_t deltap = delta - xi * mpshift;
Double_t sigma2 = sigma_n*sigma_n + sigma*sigma;
- Double_t sigma1 = TMath::Sqrt(sigma2);
+ Double_t sigma1 = sigma_n == 0 ? sigma : TMath::Sqrt(sigma2);
Double_t xlow = x - fgConvolutionNSigma * sigma1;
- Double_t xhigh = x - fgConvolutionNSigma * sigma1;
+ Double_t xhigh = x + fgConvolutionNSigma * sigma1;
Double_t step = (xhigh - xlow) / fgConvolutionSteps;
Double_t sum = 0;
for (Int_t i = 0; i <= fgConvolutionSteps/2; i++) {
- Double_t x1 = xlow + (i - .5) * step;
- Double_t x2 = xlow - (i - .5) * step;
+ Double_t x1 = xlow + (i - .5) * step;
+ Double_t x2 = xhigh - (i - .5) * step;
sum += TMath::Landau(x1, deltap, xi, kTRUE) * TMath::Gaus(x, x1, sigma1);
sum += TMath::Landau(x2, deltap, xi, kTRUE) * TMath::Gaus(x, x2, sigma1);
for (Int_t i = 2; i <= n; i++) {
Double_t delta_i = i * (delta + xi * TMath::Log(i));
Double_t xi_i = i * xi;
- Double_t sigma_i = TMath::Sqrt(Double_t(n)*sigma);
- Double_t a_i = a[i];
+ Double_t sigma_i = TMath::Sqrt(Double_t(n))*sigma;
+ Double_t a_i = a[i-2];
result += a_i * AliForwardUtil::LandauGaus(x, delta_i, xi_i,
sigma_i, sigma_n);
}
// Find the fit range
dist->GetXaxis()->SetRangeUser(fLowCut, fMaxRange);
- // Normalize peak to 1
- Double_t max = dist->GetMaximum();
- dist->Scale(1/max);
-
// Get the bin with maximum
Int_t maxBin = dist->GetMaximumBin();
Double_t maxE = dist->GetBinLowEdge(maxBin);
dist->GetXaxis()->SetRangeUser(0, fMaxRange);
// Define the function to fit
- TF1* landau1 = new TF1("landau1", landauGaus1, minE, maxEE, 5);
+ TF1* landau1 = new TF1("landau1", landauGaus1, minE,maxEE,kSigmaN+1);
// Set initial guesses, parameter names, and limits
- landau1->SetParameters(5,.5,.07,1,sigman);
+ landau1->SetParameters(1,0.5,0.07,0.1,sigman);
landau1->SetParNames("C","#Delta_{p}","#xi", "#sigma", "#sigma_{n}");
- landau1->SetParLimits(1, minE, fMaxRange);
- landau1->SetParLimits(2, 0.00, fMaxRange);
- landau1->SetParLimits(3, 0.01, fMaxRange);
- if (sigman <= 0) landau1->FixParameter(4, 0);
- else landau1->SetParLimits(4, 0, fMaxRange);
+ landau1->SetNpx(500);
+ landau1->SetParLimits(kDelta, minE, fMaxRange);
+ landau1->SetParLimits(kXi, 0.00, fMaxRange);
+ landau1->SetParLimits(kSigma, 0.01, 0.1);
+ if (sigman <= 0) landau1->FixParameter(kSigmaN, 0);
+ else landau1->SetParLimits(kSigmaN, 0, fMaxRange);
// Do the fit, getting the result object
TFitResultPtr r = dist->Fit(landau1, "RNQS", "", minE, maxEE);
-
+ landau1->SetRange(minE, fMaxRange);
fFitResults.AddAtAndExpand(new TFitResult(*r), 0);
fFunctions.AddAtAndExpand(landau1, 0);
}
// Get some parameters from seed fit
- Double_t delta1 = r->Parameter(1);
- Double_t xi1 = r->Parameter(2);
+ Double_t delta1 = r->Parameter(kDelta);
+ Double_t xi1 = r->Parameter(kXi);
Double_t maxEi = n * (delta1 + xi1 * TMath::Log(n)) + 2 * n * xi1;
Double_t minE = f->GetXmin();
// Make the fit function
- TF1* landaun = new TF1(Form("landau%d", n), &landauGausN,minE,maxEi,5+n);
- landaun->SetLineStyle((n % 10)+1);
- landaun->SetLineWidth(2);
+ TF1* landaun = new TF1(Form("landau%d", n), &landauGausN,minE,maxEi,kN+n);
+ landaun->SetLineStyle(((n-2) % 10)+2); // start at dashed
+ landaun->SetLineColor(((n-2) % 10)+2); // start at red
+ landaun->SetLineWidth(1);
+ landaun->SetNpx(500);
landaun->SetParNames("C","#Delta_{p}","#xi", "#sigma", "#sigma_{n}", "N");
// Set the initial parameters from the seed fit
- landaun->SetParameter(0, r->Parameter(0)); // Constant
- landaun->SetParameter(1, r->Parameter(1)); // Delta
- landaun->SetParameter(2, r->Parameter(2)); // xi
- landaun->SetParameter(3, r->Parameter(3)); // sigma
- landaun->SetParameter(4, r->Parameter(4)); // sigma_n
- landaun->SetParLimits(1, minE, fMaxRange); // Delta
- landaun->SetParLimits(2, 0.00, fMaxRange); // xi
- landaun->SetParLimits(3, 0.01, fMaxRange); // sigma
- landaun->SetParLimits(4, 0.00, fMaxRange); // sigma_n
+ landaun->SetParameter(kC, r->Parameter(kC)); // Constant
+ landaun->SetParameter(kDelta, r->Parameter(kDelta)); // Delta
+ landaun->SetParameter(kXi, r->Parameter(kXi)); // xi
+ landaun->SetParameter(kSigma, r->Parameter(kSigma)); // sigma
+ landaun->SetParameter(kSigmaN, r->Parameter(kSigmaN)); // sigma_n
+ landaun->SetParLimits(kDelta, minE, fMaxRange); // Delta
+ landaun->SetParLimits(kXi, 0.00, fMaxRange); // xi
+ landaun->SetParLimits(kSigma, 0.01, 1); // sigma
+ // Check if we're using the noise sigma
+ if (sigman <= 0) landaun->FixParameter(kSigmaN, 0);
+ else landaun->SetParLimits(kSigmaN, 0, fMaxRange);
// Fix the number parameter
- landaun->FixParameter(5, n); // N
+ landaun->FixParameter(kN, n); // N
// Set the range and name of the scale parameters
for (UShort_t i = 2; i <= n; i++) {// Take parameters from last fit
- landaun->SetParameter(5+i-2, n == 2 ? 0.05 : 0);
- landaun->SetParLimits(5+i-2, 0,1);
- landaun->SetParName(5+i-2, Form("a_{%d}", i));
+ landaun->SetParameter(kA+i-2, n == 2 ? 0.05 : 0.000001);
+ landaun->SetParLimits(kA+i-2, 0,1);
+ landaun->SetParName(kA+i-2, Form("a_{%d}", i));
}
// Do the fit
TFitResultPtr tr = dist->Fit(landaun, "RSQN", "", minE, maxEi);
+ landaun->SetRange(minE, fMaxRange);
fFitResults.AddAtAndExpand(new TFitResult(*tr), n-1);
fFunctions.AddAtAndExpand(landaun, n-1);
* @param delta Most probable value
* @param xi The 'width' of the distribution
*
- * @return @f$ f'_{L}(x;\Delta,\xi)
+ * @return @f$ f'_{L}(x;\Delta,\xi) @f$
*/
static Double_t Landau(Double_t x, Double_t delta, Double_t xi);
* Calculate the value of a Landau convolved with a Gaussian
*
* @f[
- * f(x;\Delta,\xi,\sigma') = \frac{1}{\sigma' \sqrt{2 \pi}}
+ * f(x;\Delta,\xi,\sigma') = \frac{1}{\sigma' \sqrt{2 \pi}}
* \int_{-\infty}^{+\infty} d\Delta' f'_{L}(x;\Delta',\xi)
- * \exp{-\frac{(\Delta-\Delta')^2}{2\sigma'^2}}
+ * \exp{-\frac{(\Delta-\Delta')^2}{2\sigma'^2}}
* @f]
*
- * where @f$ f'_{L}@ is the Landau distribution, @f$ \Delta@f$ the
- * energy loss, @f$ \xi@f the width of the Landau, and @f$
- * \sigma'^2=\sigma^2-\sigma_n^2 @f$. Here, @f$\sigma@f$ is the
+ * where @f$ f'_{L}@f$ is the Landau distribution, @f$ \Delta@f$ the
+ * energy loss, @f$ \xi@f$ the width of the Landau, and
+ * @f$ \sigma'^2=\sigma^2-\sigma_n^2 @f$. Here, @f$\sigma@f$ is the
* variance of the Gaussian, and @f$\sigma_n@f$ is a parameter modelling
* noise in the detector.
*
* @param x where to evaluate @f$ f@f$
* @param delta @f$ \Delta@f$ of @f$ f(x;\Delta,\xi,\sigma')@f$
* @param xi @f$ \xi@f$ of @f$ f(x;\Delta,\xi,\sigma')@f$
- * @param sigma @f$ \sigma@f$ of @f$\sigma'^2=\sigma^2-\sigma_n^2 @f
- * @param sigma_n @f$ \sigma_n@f$ of @f$\sigma'^2=\sigma^2-\sigma_n^2 @f
+ * @param sigma @f$ \sigma@f$ of @f$\sigma'^2=\sigma^2-\sigma_n^2 @f$
+ * @param sigma_n @f$ \sigma_n@f$ of @f$\sigma'^2=\sigma^2-\sigma_n^2 @f$
*
* @return @f$ f@f$ evaluated at @f$ x@f$.
*/
//------------------------------------------------------------------
/**
* Evaluate
- * @f$
- * f_N(x;\Delta,\xi,\sigma') = \sum_{i=1}^N a_i f(x;\Delta_i,\xi_i,\sigma'_i)
- * @f$
+ * @f[
+ f_N(x;\Delta,\xi,\sigma') = \sum_{i=1}^N a_i f(x;\Delta_i,\xi_i,\sigma'_i)
+ @f]
*
* where @f$ f(x;\Delta,\xi,\sigma')@f$ is the convolution of a
* Landau with a Gaussian (see LandauGaus). Note that
- * @f$ a_1 = 1@f$, @\Delta_i = i(\Delta_1 + \xi\log(i))@f$,
- * @f$\xi_i=i\xi_1@f, and @f$\sigma_i'^2 = \sigma_n^2 + i\sigma_1^2@f$.
+ * @f$ a_1 = 1@f$, @f$\Delta_i = i(\Delta_1 + \xi\log(i))@f$,
+ * @f$\xi_i=i\xi_1@f$, and @f$\sigma_i'^2 = \sigma_n^2 + i\sigma_1^2@f$.
*
* References:
* - <a href="http://dx.doi.org/10.1016/0168-583X(84)90472-5">Nucl.Instrum.Meth.B1:16</a>
*/
struct ELossFitter
{
+ enum {
+ kC = 0,
+ kDelta,
+ kXi,
+ kSigma,
+ kSigmaN,
+ kN,
+ kA
+ };
/**
* Constructor
*
* If there's no 1-particle fit present, it does that first
*
* @param dist Data to fit the function to
- * @param N Number of particle signals to fit
+ * @param n Number of particle signals to fit
* @param sigman If larger than zero, the initial guess of the
* detector induced noise. If zero or less, then this
* parameter is ignored in the fit (fixed at 0)
+++ /dev/null
-void
-DrawFits(const char* fname="AnalysisResults.root")
-{
- TFile* file = TFile::Open(fname, "READ");
- if (!file) {
- Error("DrawFits", "Couldn't open %s", fname);
- return;
- }
-
- TList* forward =
- static_cast<TList*>(file->Get("PWG2forwardDnDeta/Forward"));
- if (!forward) {
- Error("DrawFits", "Couldn't get forward list from %s", fname);
- return;
- }
-
- TList* fitter =
- static_cast<TList*>(forward->FindObject("fmdEnergyFitter"));
- if (!fitter) {
- Error("DrawFits", "Couldn't get fitter folder");
- return;
- }
-
- TList stacks;
- stacks.Add(fitter->FindObject("chi2"));
- stacks.Add(fitter->FindObject("c"));
- stacks.Add(fitter->FindObject("mpv"));
- stacks.Add(fitter->FindObject("w"));
- stacks.Add(fitter->FindObject("n"));
- Int_t i=2;
- while (true) {
- TObject* o = static_cast<THStack*>(fitter->FindObject(Form("a%d",i++)));
- if (!o) break;
- Info("DrawFits", "Adding %s", o->GetName());
- stacks.Add(o);
- }
- // stacks.ls();
- Int_t nMax = stacks.GetEntries();
- for (Int_t i = nMax-1; i > 4; i--) {
- THStack* stack = static_cast<THStack*>(stacks.At(i));
- TIter nextH(stack->GetHists());
- TH1* hist = 0;
- Bool_t hasData = kFALSE;
- while ((hist = static_cast<TH1*>(nextH())))
- if (hist->Integral() > 0) hasData = kTRUE;
- if (!hasData) nMax--;
- }
-
- gStyle->SetOptTitle(0);
-#if 1
- TCanvas* c = new TCanvas("c", "C",800,800);
- c->SetFillColor(0);
- c->SetBorderMode(0);
- c->SetBorderSize(0);
- c->Divide(1, nMax,0,0);
-
- TIter next(&stacks);
- THStack* stack = 0;
- i = 1;
- while ((stack = static_cast<THStack*>(next()))) {
- if (i > nMax) break;
- TVirtualPad* p = c->cd(i);
- p->SetFillColor(0);
- p->SetFillStyle(0);
- p->SetGridx();
- stack->Draw("nostack");
- stack->GetHistogram()->SetYTitle(stack->GetTitle());
- stack->GetHistogram()->SetXTitle("#eta");
- TAxis* yaxis = stack->GetHistogram()->GetYaxis();
- yaxis->SetTitleSize(0.15);
- yaxis->SetLabelSize(0.08);
- yaxis->SetTitleOffset(0.35);
- yaxis->SetNdivisions(10);
- TAxis* xaxis = stack->GetHistogram()->GetXaxis();
- xaxis->SetTitleSize(0.15);
- xaxis->SetLabelSize(0.08);
- xaxis->SetTitleOffset(0.35);
- xaxis->SetNdivisions(320);
- i++;
- p->cd();
- }
-#endif
-
- gStyle->SetOptFit(111111);
- gStyle->SetStatW(0.3);
- gStyle->SetStatH(0.3);
- gStyle->SetStatColor(0);
- gStyle->SetStatBorderSize(1);
- TCanvas* c1 = new TCanvas("c1", "c1", 800,800);
- c1->SetFillColor(0);
- c1->SetBorderMode(0);
- c1->SetBorderSize(0);
- c1->Divide(1, 5,0,0);
-
- const char* dets[] = { "FMD1I", "FMD2I", "FMD2O", "FMD3I", "FMD3O", 0 };
- for (Int_t i = 0; i < 5; i++) {
- TVirtualPad* p = c1->cd(i+1);
- p->SetGridx();
- p->SetFillColor(0);
- p->SetFillStyle(0);
- TList* d = static_cast<TList*>(fitter->FindObject(dets[i]));
- if (!d) {
- Warning("DrawFits", "List %s not found", dets[i]);
- continue;
- }
- TH1* edist = static_cast<TH1*>(d->FindObject(Form("%s_edist", dets[i])));
- if (!edist) {
- Warning("DrawFits", "Histogram %s_edist not found", dets[i]);
- continue;
- }
- edist->Draw();
- TF1* f = 0;
- TIter nextF(edist->GetListOfFunctions());
- while ((f = static_cast<TF1*>(nextF()))) {
- Double_t chi2 = f->GetChisquare();
- Int_t ndf = f->GetNDF();
- Printf("%s %s:\n Range: %f-%f\n"
- "chi^2/ndf= %f / %d = %f",
- edist->GetName(), f->GetName(),
- f->GetXmin(), f->GetXmax(), chi2, ndf,
- (ndf > 0) ? chi2/ndf : 0);
- for (Int_t j = 0; j < f->GetNpar(); j++) {
- Printf(" %-20s : %9.4f +/- %9.4f",
- f->GetParName(j), f->GetParameter(j), f->GetParError(j));
- }
- }
- p->cd();
- }
- c1->cd();
-}
* @param mask Trigger mask
* @param energy Collision energy @f$ \sqrt{s}@f$ or @f$\sqrt{s_{NN}}@f$
* @param title Title to put on the plot
+ * @param doHHD Whether to do HHD comparison
+ * @param doComp Whether to do comparisons
*
* @return True on success, false otherwise
*/
* @param rebin How many times to rebin
* @param energy Collision energy
* @param mask Trigger mask
+ * @param doHHD Whether to do HHD comparison
+ * @param doComp Whether to do comparisons
*
* @return true on success, false otherwise
*/
/**
* Get the result from previous analysis code
*
- * @param fn File to open
+ * @param fn File to open
+ * @param nsd Whether this is NSD
*
* @return null or result of previous analysis code
*/
* @param s Scaling factor
* @param ytitle Y axis title
* @param force Whether to draw the stack first or not
+ * @param ynDiv Divisions on Y axis
*/
void FixAxis(THStack* stack, Double_t s, const char* ytitle,
Int_t ynDiv=10, Bool_t force=true)
/**
* Run first pass of the analysis - that is read in ESD and produce AOD
*
- * @param file ESD input file
- * @param nEvents Number of events to process
- * @param nCutBins Number of additional bins to cut off
- * @param correctionCut Threshold for when to use secondary map
+ * @param esddir ESD input directory. Any file matching the pattern
+ * *AliESDs*.root are added to the chain
+ * @param nEvents Number of events to process. If 0 or less, then
+ * all events are analysed
+ * @param flags Job flags. A bit mask of
+ * - 0x01 (MC) Monte-Carlo truth handler installed
+ * - 0x02 (PROOF) Proof mode
+ * - 0x04 (FULL) Run full analysis - including terminate
+ * - 0x08 (ANALYSE) Run only analysis - not terminate
+ * - 0x10 (TERMINATE) Run no analysis - just terminate.
+ *
+ * of these, PROOF, FULL, ANALYSE, and TERMINATE are mutually exclusive.
+ *
+ * If PROOF mode is selected, then Terminate will be run on the master node
+ * in any case.
+ *
+ * If FULL is selected, then the full analysis is done. Note that
+ * this can be combined with PROOF but with no effect.
+ *
+ * ANALYSE cannot be combined with FULL, PROOF, or TERMINATE. In a
+ * local job, the output AnalysisResults.root will still be made, but
+ * terminate is not called.
+ *
+ * In TERMINATE, the file AnalysisResults.root is opened and all
+ * containers found are connected to the tasks. The terminate member
+ * function is then called
+ *
*
* @ingroup pwg2_forward_analysis_scripts
*/
void
Pass1(const char* esddir=".",
- Int_t nEvents=1000,
- Int_t nCutBins=1,
- Int_t correctionCut=0.1,
- Int_t proof)
+ Int_t nEvents=1000,
+ UShort_t flags=0x4)
{
+ Printf("Flags: 0x%04x", flags);
+ Printf(" MC: %s", flags & 0x01 ? "yes" : "no");
+ Printf(" Proof mode %s", flags & 0x02 ? "yes" : "no");
+ Printf(" Full analysis %s", flags & 0x04 ? "yes" : "no");
+ Printf(" Analyse only %s", flags & 0x08 ? "yes" : "no");
+ Printf(" Terminate only %s", flags & 0x10 ? "yes" : "no");
+
gROOT->LoadMacro("$ALICE_ROOT/PWG2/FORWARD/analysis2/RunManager.C");
- RunManager(esddir, kFALSE, nEvents, nCutBins, correctionCut, proof);
+ RunManager(esddir, nEvents, flags);
}
//
// EOF
* @param vzMax Maximum interaction point z coordinate
* @param rebin How many bins to group
* @param title Title to put on the plot
+ * @param hhd Whether to do HHD comparison
+ * @param comp Whether to do comparisons
*
* @ingroup pwg2_forward_analysis_scripts
*/
rebin=1
vzmin=-10
vzmax=10
-ncutbins=1
-correctioncut=0.1
batch=0
gdb=0
proof=0
cms=900
hhd=1
comp=1
+anal=0
+term=0
+tit=
usage()
{
Options:
-h,--help This help
-n,--events N Number of events ($nev)
- -N,--no-analysis Do not analyse, just draw ($noanal)
- -D,--no-draw Do not draw, just analysis ($nodraw)
+ -1,--pass1 Run only pass 1, no draw ($nodraw)
+ -2,--pass2 Run only pass 2, just draw ($noanal)
-r,--rebin N Rebin by N ($rebin)
- -c,--n-cut-bins N Number of cut bins ($ncutbins)
- -C,--correction-cut V Cut on secondary corr, ($correctioncut)
-v,--vz-min CM Minimum value of vz ($vzmin)
-V,--vz-max CM Maximum value of vz ($vzmax)
-t,--trigger TYPE Select trigger TYPE ($type)
-e,--energy CMS Center of mass energy ($cms)
-b,--batch Do batch processing ($batch)
- -p,--proof Run in PROOF(Lite) mode ($proof)
+ -P,--proof Run in PROOF(Lite) mode ($proof)
+ -A,--analyse-only Run only analysis ($anal)
+ -T,--terminate-only Run only terminate ($term)
+ -S,--title STRING Set the title string ($tit)
-g,--gdb Run in GDB mode ($gdb)
-H,--hhd Do comparison to HHD ($hhd)
-O,--other Do comparison to other ($comp)
case $1 in
-h|--help) usage ; exit 0;;
-n|--events) nev=$2 ; shift ;;
- -N|--no-analysis) noanal=`toggle $noanal` ;;
- -D|--no-draw) nodraw=`toggle $nodraw` ;;
+ -2|--pass2) noanal=`toggle $noanal` ;;
+ -1|--pass1) nodraw=`toggle $nodraw` ;;
-b|--batch) batch=`toggle $batch` ;;
- -p|--proof) proof=`toggle $proof` ;;
+ -P|--proof) proof=`toggle $proof` ;;
+ -A|--analyse-only) anal=`toggle $anal` ;;
+ -T|--terminate-only) term=`toggle $term` ;;
-g|--gdb) gdb=`toggle $gdb` ;;
-H|--hhd) hhd=`toggle $hhd` ;;
-O|--other) other=`toggle $other` ;;
-v|--vz-min) vzmin=$2 ; shift ;;
-V|--vz-max) vzmax=$2 ; shift ;;
-e|--energy) cms=$2 ; shift ;;
- -c|--n-cut-bins) ncutbins=$2 ; shift ;;
- -C|--correction-cut) correctioncut=$2 ; shift ;;
+ -S|--title) tit="$2" ; shift ;;
-t|--type)
if test "x$type" = "x" ; then type=$2 ; else type="$type|$2"; fi
shift ;;
if test $noanal -lt 1 ; then
rm -f AnalysisResult.root AliAODs.root
rm -f fmdana.png
+
+ # Setup analysis flags
+ af=0
+ if test $proof -gt 0 ; then
+ af=2
+ else
+ if test $anal -gt 0 ; then
+ af=8
+ elif test $term -gt 0 ; then
+ af=16
+ else
+ af=4
+ fi
+ fi
if test $gdb -gt 0 ; then
export PROOF_WRAPPERCMD="gdb -batch -x $ALICE_ROOT/PWG2/FORWARD/analysis2/gdb_cmds --args"
fi
- aliroot $opts $ALICE_ROOT/PWG2/FORWARD/analysis2/Pass1.C\(\".\",$nev,$ncutbins,$correctioncut,$proof\) $redir
+ aliroot $opts $ALICE_ROOT/PWG2/FORWARD/analysis2/Pass1.C\(\".\",$nev,$af\) \
+ $redir
rm -f event_stat.root EventStat_temp.root outputs_valid
if test ! -f AnalysisResults.root || test ! -f AliAODs.root ; then
echo "Analysis failed"
if test $nodraw -lt 1 ; then
rm -f result.root
- aliroot ${opts} $ALICE_ROOT/PWG2/FORWARD/analysis2/Pass2.C\(\"AliAODs.root\",\"$type\",$cms,$vzmin,$vzmax,$rebin,\"Run\ 118506,\ $nev\ Events,\ v_{z}#in[$vzmin,$vzmax],\ $type\",$hhd,$comp\)
+ if test "x$tit" = "x" ; then
+ tit="$nev\ events,\ v_{z}#in[$vzmin,$vzmax],\ $type"
+ else
+ tit=`echo $tit | tr ' ' '\ '`
+ fi
+ aliroot ${opts} $ALICE_ROOT/PWG2/FORWARD/analysis2/Pass2.C\(\"AliAODs.root\",\"$type\",$cms,$vzmin,$vzmax,$rebin,\"$tit\",$hhd,$comp\)
fi
/**
- * Script to set-up a train
+ * Flags for the analysis
+ *
+ * @ingroup pwg2_forward_analysis_scripts
+ */
+enum {
+ kMC = 0x01, // MC input
+ kProof = 0x02, // Run in proof mode
+ kFull = 0x04, // Do full analysis - including terminate
+ kAnalyse = 0x08, // Run the analysis
+ kTerminate = 0x10 // Run only terminate
+};
+
+/**
+ * Run first pass of the analysis - that is read in ESD and produce AOD
+ *
+ * @param esddir ESD input directory. Any file matching the pattern
+ * *AliESDs*.root are added to the chain
+ * @param nEvents Number of events to process. If 0 or less, then
+ * all events are analysed
+ * @param flags Job flags. A bit mask of
+ * - 0x01 (MC) Monte-Carlo truth handler installed
+ * - 0x02 (PROOF) Proof mode
+ * - 0x04 (FULL) Run full analysis - including terminate
+ * - 0x08 (ANALYSE) Run only analysis - not terminate
+ * - 0x10 (TERMINATE) Run no analysis - just terminate.
+ *
+ * of these, PROOF, FULL, ANALYSE, and TERMINATE are mutually exclusive.
+ *
+ * If PROOF mode is selected, then Terminate will be run on the master node
+ * in any case.
+ *
+ * If FULL is selected, then the full analysis is done. Note that
+ * this can be combined with PROOF but with no effect.
+ *
+ * ANALYSE cannot be combined with FULL, PROOF, or TERMINATE. In a
+ * local job, the output AnalysisResults.root will still be made, but
+ * terminate is not called.
+ *
+ * In TERMINATE, the file AnalysisResults.root is opened and all
+ * containers found are connected to the tasks. The terminate member
+ * function is then called
*
- * @param esd ESD file
- * @param mc Whether to do MC or not
- * @param nEvents Number of events
- * @param nCutBins Bins to cut away
- * @param correctionCut
*
* @ingroup pwg2_forward_analysis_scripts
*/
-void RunManager(const char* esddir, Bool_t mc=kFALSE, Int_t nEvents=1000,
- Int_t nCutBins=1, Float_t correctionCut=0.1,
- Bool_t proof=false)
+void RunManager(const char* esddir,
+ Int_t nEvents=1000,
+ UShort_t flags=kFull)
{
// --- Libraries to load -------------------------------------------
gSystem->Load("libVMC");
gSystem->Load("libPWG2forward2");
// --- Check for proof mode, and possibly upload pars --------------
- if (proof) {
+ if (flags & kProof) {
TProof::Open("workers=2");
const char* pkgs[] = { "STEERBase", "ESD", "AOD", "ANALYSIS",
"ANALYSISalice", "PWG2forward", "PWG2forward2", 0};
Info("RunManager", "Adding %s to chain", esd.Data());
chain->Add(esd);
}
+ // If 0 or less events is select, choose all
+ if (nEvents <= 0) nEvents = chain->GetEntries();
+
// --- Creating the manager and handlers ---------------------------
AliAnalysisManager *mgr = new AliAnalysisManager("Analysis Train",
mgr->SetInputEventHandler(esdHandler);
// Monte Carlo handler
- // AliMCEventHandler* mcHandler = new AliMCEventHandler();
- // mgr->SetMCtruthEventHandler(mcHandler);
- // mcHandler->SetReadTR(readTR);
+ if (flags & kMC) {
+ AliMCEventHandler* mcHandler = new AliMCEventHandler();
+ mgr->SetMCtruthEventHandler(mcHandler);
+ mcHandler->SetReadTR(readTR);
+ }
// AOD output handler
AliAODHandler* aodHandler = new AliAODHandler();
// --- Add tasks ---------------------------------------------------
gROOT->LoadMacro("$ALICE_ROOT/PWG2/FORWARD/analysis2/AddTaskFMD.C");
gROOT->LoadMacro("$ALICE_ROOT/ANALYSIS/macros/AddTaskPhysicsSelection.C");
- AliAnalysisTask* task = AddTaskFMD(nCutBins, correctionCut);
+ AliAnalysisTask* task = AddTaskFMD();
mgr->ConnectOutput(task, 0, mgr->GetCommonOutputContainer());
- task = AddTaskPhysicsSelection(mc, kTRUE, kTRUE);
+ task = AddTaskPhysicsSelection((flags & kMC), kTRUE, kTRUE);
mgr->ConnectOutput(task, 0, mgr->GetCommonOutputContainer());
// --- Run the analysis --------------------------------------------
Error("RunManager", "Failed to initialize analysis train!");
return;
}
+ // Skip terminate if we're so requested and not in Proof or full mode
+ mgr->SetSkipTerminate(!(flags & kProof) &&
+ !(flags & kFull) &&
+ (flags & kAnalyse));
// Some informative output
mgr->PrintStatus();
// mgr->SetDebugLevel(3);
- if (mgr->GetDebugLevel() < 1 && !proof)
+ if (mgr->GetDebugLevel() < 1 && !(flags & kProof))
mgr->SetUseProgressBar(kTRUE);
// Run the train
t.Start();
- mgr->StartAnalysis(proof ? "proof" : "local", chain, nEvents);
+ if (!(flags & kTerminate))
+ mgr->StartAnalysis((flags & kProof) ? "proof" : "local", chain, nEvents);
+ else {
+ mgr->ImportWrappers();
+ mgr->Terminate();
+ }
t.Stop();
t.Print();
}
--- /dev/null
+#include <TFile.h>
+#include <THStack.h>
+#include <TList.h>
+#include <TError.h>
+#include <TCanvas.h>
+#include <TPad.h>
+#include <TStyle.h>
+#include <TF1.h>
+#include <TLegend.h>
+#include <TMath.h>
+
+//____________________________________________________________________
+// Global
+TList* fitter = 0;
+TCanvas* canvas = 0;
+const char* pdfName = "FitResults.pdf";
+
+//____________________________________________________________________
+TList* OpenFile(const char* fname)
+{
+ TFile* file = TFile::Open(fname, "READ");
+ if (!file) {
+ Error("DrawFits", "Couldn't open %s", fname);
+ return 0;
+ }
+
+ TList* forward =
+ static_cast<TList*>(file->Get("PWG2forwardDnDeta/Forward"));
+ if (!forward) {
+ Error("DrawFits", "Couldn't get forward list from %s", fname);
+ return 0;
+ }
+
+ fitter = static_cast<TList*>(forward->FindObject("fmdEnergyFitter"));
+ if (!fitter) {
+ Error("DrawFits", "Couldn't get fitter folder");
+ return 0;
+ }
+ return fitter;
+}
+//____________________________________________________________________
+TList* CheckFitter(const char* fname="AnalysisResults.root")
+{
+ if (!fitter) return OpenFile(fname);
+ return fitter;
+}
+
+//____________________________________________________________________
+TCanvas* CheckCanvas()
+{
+ if (canvas) return canvas;
+
+ gStyle->SetOptFit(111111);
+ gStyle->SetTitleFillColor(0);
+ gStyle->SetTitleBorderSize(0);
+ gStyle->SetTitleX(0);
+ gStyle->SetTitleY(.9);
+ gStyle->SetTitleW(.4);
+ gStyle->SetTitleH(.1);
+ gStyle->SetStatW(0.2);
+ gStyle->SetStatH(0.2);
+ gStyle->SetStatColor(0);
+ gStyle->SetStatBorderSize(1);
+ gStyle->SetOptTitle(0);
+ gStyle->SetOptFit(1111);
+ gStyle->SetStatW(0.3);
+ gStyle->SetStatH(0.15);
+ gStyle->SetStatColor(0);
+ gStyle->SetStatBorderSize(1);
+
+ canvas = new TCanvas("c", "C", Int_t(800 / TMath::Sqrt(2)), 800);
+ canvas->SetFillColor(0);
+ canvas->SetBorderMode(0);
+ canvas->SetBorderSize(0);
+ canvas->SetBottomMargin(0.15);
+ return canvas;
+}
+
+//____________________________________________________________________
+void DrawSummary(const char* fname="AnalysisResults.root")
+{
+ if (!CheckFitter(fname)) {
+ Error("DrawFits", "File not opened");
+ return;
+ }
+ if (!CheckCanvas()) {
+ Error("DrawFits", "No canvas");
+ return;
+ }
+ canvas->Clear();
+
+ THStack* chi2nu;
+ THStack* c;
+ THStack* delta;
+ THStack* xi;
+ THStack* sigma;
+ THStack* sigman;
+ THStack* n;
+ TList stacks;
+ stacks.Add(chi2nu = static_cast<THStack*>(fitter->FindObject("chi2")));
+ stacks.Add(c = static_cast<THStack*>(fitter->FindObject("c")));
+ stacks.Add(delta = static_cast<THStack*>(fitter->FindObject("delta")));
+ stacks.Add(xi = static_cast<THStack*>(fitter->FindObject("xi")));
+ stacks.Add(sigma = static_cast<THStack*>(fitter->FindObject("sigma")));
+ stacks.Add(sigman = static_cast<THStack*>(fitter->FindObject("sigman")));
+ stacks.Add(n = static_cast<THStack*>(fitter->FindObject("n")));
+ Int_t baseA = stacks.GetEntries()+1;
+ Int_t i=2;
+ while (true) {
+ TObject* o = fitter->FindObject(Form("a%d",i++));
+ if (!o) break;
+ Info("DrawFits", "Adding %s", o->GetName());
+ stacks.Add(o);
+ }
+ // stacks.ls();
+ Int_t nMax = stacks.GetEntries();
+ for (Int_t i = nMax-1; i >= baseA; i--) {
+ THStack* stack = static_cast<THStack*>(stacks.At(i));
+ TIter nextH(stack->GetHists());
+ TH1* hist = 0;
+ Bool_t hasData = kFALSE;
+ while ((hist = static_cast<TH1*>(nextH())))
+ if (hist->Integral() > 0) hasData = kTRUE;
+ if (!hasData) nMax--;
+ }
+
+ canvas->SetRightMargin(0.05);
+ canvas->SetTopMargin(0.05);
+ canvas->Divide(2, (nMax+1)/2, 0.1, 0, 0);
+
+ TIter next(&stacks);
+ THStack* stack = 0;
+ i = 0;
+ Int_t b = 1;
+ while ((stack = static_cast<THStack*>(next()))) {
+ if (i > nMax) break;
+ TVirtualPad* p = canvas->cd(1+i/5 + 2*(i%5));
+ p->SetLeftMargin(.15);
+ p->SetFillColor(0);
+ p->SetFillStyle(0);
+ p->SetGridx();
+ stack->Draw("nostack");
+ stack->GetHistogram()->SetYTitle(stack->GetTitle());
+ stack->GetHistogram()->SetXTitle("#eta");
+
+ TAxis* yaxis = stack->GetHistogram()->GetYaxis();
+ if (i == 0) yaxis->SetRangeUser(0,20); // Chi2
+ if (i == 1) stack->SetMaximum(1); // c
+ if (i == 2) stack->SetMaximum(1); // delta
+ if (i == 3) stack->SetMaximum(0.1); // xi
+ if (i == 4 || i == 5) stack->SetMaximum(0.5); // sigma{,n}
+ if (i == 7) stack->SetMaximum(0.5); // a
+ yaxis->SetTitleSize(0.15);
+ yaxis->SetLabelSize(0.08);
+ yaxis->SetTitleOffset(0.35);
+ yaxis->SetNdivisions(5);
+
+ TAxis* xaxis = stack->GetHistogram()->GetXaxis();
+ xaxis->SetTitleSize(0.15);
+ xaxis->SetLabelSize(0.08);
+ xaxis->SetTitleOffset(0.35);
+ xaxis->SetNdivisions(10);
+
+ // Redraw
+ stack->Draw("nostack");
+ i++;
+ if (i >= 5) b = 2;
+ p->cd();
+ }
+ canvas->Print(pdfName, "Title:Fit summary");
+}
+
+//____________________________________________________________________
+void DrawRings(const char* fname="AnalysisResults.root")
+{
+ if (!CheckFitter(fname)) {
+ Error("DrawFits", "File not opened");
+ return;
+ }
+ if (!CheckCanvas()) {
+ Error("DrawFits", "No canvas");
+ return;
+ }
+ canvas->Clear();
+
+ canvas->Clear();
+ canvas->Divide(1, 5,0,0);
+
+ const char* dets[] = { "FMD1I", "FMD2I", "FMD2O", "FMD3I", "FMD3O", 0 };
+ for (Int_t i = 0; i < 5; i++) {
+ TVirtualPad* p = canvas->cd(i+1);
+ p->SetGridx();
+ p->SetFillColor(0);
+ p->SetFillStyle(0);
+ TList* d = static_cast<TList*>(fitter->FindObject(dets[i]));
+ if (!d) {
+ Warning("DrawFits", "List %s not found", dets[i]);
+ continue;
+ }
+ TH1* edist = static_cast<TH1*>(d->FindObject(Form("%s_edist", dets[i])));
+ if (!edist) {
+ Warning("DrawFits", "Histogram %s_edist not found", dets[i]);
+ continue;
+ }
+ edist->Draw();
+ TF1* f = 0;
+ TIter nextF(edist->GetListOfFunctions());
+ while ((f = static_cast<TF1*>(nextF()))) {
+ Double_t chi2 = f->GetChisquare();
+ Int_t ndf = f->GetNDF();
+ Printf("%s %s:\n Range: %f-%f\n"
+ "chi^2/ndf= %f / %d = %f",
+ edist->GetName(), f->GetName(),
+ f->GetXmin(), f->GetXmax(), chi2, ndf,
+ (ndf > 0) ? chi2/ndf : 0);
+ for (Int_t j = 0; j < f->GetNpar(); j++) {
+ Printf(" %-20s : %9.4f +/- %9.4f",
+ f->GetParName(j), f->GetParameter(j), f->GetParError(j));
+ }
+ }
+ p->cd();
+ }
+ canvas->cd();
+ canvas->Print(pdfName, "Title:Fit to rings");
+}
+
+//____________________________________________________________________
+void DrawEtaBins(const char* fname="AnalysisResults.root")
+{
+ if (!CheckFitter(fname)) {
+ Error("DrawFits", "File not opened");
+ return;
+ }
+ if (!CheckCanvas()) {
+ Error("DrawFits", "No canvas");
+ return;
+ }
+ canvas->Clear();
+ canvas->Divide(2,2,0,0);
+
+ for (UShort_t d=1; d<=3; d++) {
+ UShort_t nQ = (d == 1 ? 1 : 2);
+ for (UShort_t q = 0; q < nQ; q++) {
+ Char_t r = (q == 0 ? 'I' : 'O');
+
+ TList* ring =
+ static_cast<TList*>(fitter->FindObject(Form("FMD%d%c",d,r)));
+ if (!ring) {
+ Error("PrintFits", "Couldn't get ring FMD%d%c", d,r);
+ continue;
+ }
+ TList* edists = static_cast<TList*>(ring->FindObject("EDists"));
+ if (!edists) {
+ Error("PrintFits", "Couldn't get EDists list for FMD%d%c", d,r);
+ continue;
+ }
+ TIter next(edists);
+ TH1* dist = 0;
+ Int_t i = 0;
+ Int_t j = 1;
+ while ((dist = static_cast<TH1*>(next()))) {
+ if (i == 4) {
+ i = 0;
+ j++;
+ canvas->Print(pdfName, Form("Title:FMD%d%c page %2d", d,r,j));
+ }
+ TVirtualPad* p = canvas->cd(++i);
+ p->SetFillColor(kWhite);
+ p->SetFillStyle(0);
+ p->SetBorderSize(0);
+ p->SetLogy();
+ dist->SetMaximum(15);
+ dist->Draw();
+
+ }
+ if (i != 0) {
+ i++;
+ for (; i <= 4; i++) {
+ TVirtualPad* p = canvas->cd(i);
+ p->Clear();
+ p->SetFillColor(kMagenta-3);
+ p->SetFillStyle(0);
+ p->SetBorderSize(0);
+ }
+ canvas->Print(pdfName, Form("FMD%d%c page %2d", d,r,j++));
+ }
+ }
+ }
+}
+
+//____________________________________________________________________
+void
+DrawFits(const char* fname="AnalysisResults.root")
+{
+ if (!CheckCanvas()) {
+ Error("DrawFits", "No canvas");
+ return;
+ }
+ canvas->Print(Form("%s[", pdfName));
+ DrawSummary(fname);
+ DrawRings(fname);
+ DrawEtaBins(fname);
+ canvas->Print(Form("%s]", pdfName));
+}
--- /dev/null
+#ifndef __CINT__
+# include "AliForwardUtil.h"
+# include <TFile.h>
+# include <TList.h>
+# include <TH1.h>
+# include <TF1.h>
+# include <TFitResult.h>
+# include <TMath.h>
+# include <TStyle.h>
+# include <TArrow.h>
+# include <TCanvas.h>
+#else
+class TCanvas;
+class TFile;
+class TH1;
+class TList;
+class TF1;
+#endif
+
+//____________________________________________________________________
+TH1* GetEDist(TList* ef, UShort_t d, Char_t r, UShort_t etabin)
+{
+ TList* dL = static_cast<TList*>(ef->FindObject(Form("FMD%d%c",d,r)));
+ if (!dL) {
+ Error("GetEDist", "Couldn't find list FMD%d%c",d,r);
+ ef->ls();
+ return 0;
+ }
+ if (etabin > 999) {
+ TH1* hist = static_cast<TH1*>(dL->FindObject(Form("FMD%d%c_edist",d,r)));
+ if (hist) {
+ Error("GetEDist", "Couldn't find EDists histogram for FMD%d%c",d,r);
+ return 0;
+ }
+ }
+
+ TList* edL = static_cast<TList*>(dL->FindObject("EDists"));
+ if (!edL) {
+ Error("GetEDist", "Couldn't find list EDists for FMD%d%c",d,r);
+ return 0;
+ }
+
+ TH1* hist = static_cast<TH1*>(edL->FindObject(Form("FMD%d%c_etabin%03d",
+ d, r, etabin)));
+ if (!hist) {
+ Error("GetEDist", "Couldn't find histogra FMD%d%c_etabin%03d",
+ d,r, etabin);
+ return 0;
+ }
+
+ return hist;
+}
+
+//____________________________________________________________________
+TH1* GetEDist(TList* ef, UShort_t d, Char_t r, Float_t eta)
+{
+ if (!ef) {
+ Error("GetEDist", "EF not set");
+ return 0;
+ }
+ TAxis* etaAxis = static_cast<TAxis*>(ef->FindObject("etaAxis"));
+ if (!etaAxis) {
+ Error("GetEDist", "Couldn't find eta axis in list");
+ return 0;
+ }
+
+ UShort_t bin = etaAxis->FindBin(eta);
+ if (bin <= 0 || bin > etaAxis->GetNbins()) {
+ Error("GetEDist", "eta=%f out of range [%f,%f] - getting ring histo",
+ eta, etaAxis->GetXmin(), etaAxis->GetXmax());
+ return GetEDist(ef, d, r, UShort_t(1000));
+ }
+
+ return GetEDist(ef, d, r, bin);
+}
+
+//____________________________________________________________________
+TList* GetEF(TFile* file)
+{
+ TList* forward = static_cast<TList*>(file->Get("PWG2forwardDnDeta/Forward"));
+ if (!forward) {
+ Error("GetEF", "Failed to get list PWG2forwardDnDeta/Forward from file");
+ return 0;
+ }
+ TList* ef = static_cast<TList*>(forward->FindObject("fmdEnergyFitter"));
+ if (!ef) {
+ Error("GetEF", "Failed to get energy fitter list");
+ return 0;
+ }
+
+ return ef;
+}
+
+//____________________________________________________________________
+TList* ef = 0;
+
+//____________________________________________________________________
+TList* CheckEF()
+{
+ if (ef) return ef;
+
+ TFile* file = TFile::Open("AnalysisResults.root", "READ");
+ if (!file) {
+ Error("Fit1", "Failed to open file");
+ return 0;
+ }
+ return GetEF(file);
+}
+
+//____________________________________________________________________
+TCanvas* c = 0;
+
+//____________________________________________________________________
+TCanvas* CheckC()
+{
+ if (c) return c;
+
+ gStyle->SetOptFit(1111);
+ gStyle->SetCanvasColor(0);
+ gStyle->SetCanvasBorderSize(0);
+ gStyle->SetCanvasBorderMode(0);
+ gStyle->SetCanvasDefW(800);
+ gStyle->SetCanvasDefH(800);
+ gStyle->SetPadTopMargin(0.05);
+ gStyle->SetPadRightMargin(0.05);
+ gStyle->SetTitleBorderSize(0);
+ gStyle->SetTitleFillColor(0);
+ gStyle->SetTitleStyle(0);
+ gStyle->SetStatBorderSize(1);
+ gStyle->SetStatColor(0);
+ gStyle->SetStatStyle(0);
+ gStyle->SetStatX(0.95);
+ gStyle->SetStatY(0.95);
+ gStyle->SetStatW(0.15);
+ gStyle->SetStatH(0.15);
+
+ c = new TCanvas("c", "c");
+ c->SetLogy();
+
+ return c;
+}
+
+//____________________________________________________________________
+void PrintFit(TF1* f)
+{
+ Int_t nu = f->GetNDF();
+ Double_t chi2 = f->GetChisquare();
+ Double_t chi2nu = (nu > 0 ? chi2/nu : 0);
+ Printf("%s: chi^2/nu=%f/%d=%f [%f,%f]",
+ f->GetName(), chi2, nu, chi2nu,
+ f->GetXmin(), f->GetXmax());
+ for (Int_t i = 0; i < f->GetNpar(); i++) {
+ Double_t v = f->GetParameter(i);
+ Double_t e = f->GetParError(i);
+ Double_t r = 100*(v == 0 ? 1 : e / v);
+ Printf("%32s = %14.7f +/- %14.7f (%5.1f%%)",f->GetParName(i),v,e,r);
+ }
+}
+
+//____________________________________________________________________
+void FitELoss(Int_t n, UShort_t d, Char_t r, Float_t eta)
+{
+ TList* ef1 = CheckEF();
+ TCanvas* c1 = CheckC();
+ if (!ef1) return;
+ if (!c1) return;
+
+ TH1* dist = GetEDist(ef1, d, r, eta);
+ if (!dist) return;
+
+ AliForwardUtil::ELossFitter f(0.4, 10, 4);
+ TF1* landau1 = new TF1(*f.Fit1Particle(dist, 0));
+ landau1->SetName("Landau1");
+ PrintFit(landau1);
+ TF1* landaun = new TF1(*f.FitNParticle(dist, n, 0));
+ landau1->SetName(Form("Landau%d", n));
+ PrintFit(landaun);
+ landau1->SetRange(0,10);
+ landaun->SetRange(0,10);
+ landau1->SetLineWidth(4);
+ landaun->SetLineWidth(4);
+ landau1->SetLineColor(kBlack);
+ landaun->SetLineColor(kBlack);
+
+ dist->GetListOfFunctions()->Clear();
+ dist->GetListOfFunctions()->Add(landau1);
+ dist->GetListOfFunctions()->Add(landaun);
+ dist->GetListOfFunctions()->ls();
+ dist->Draw();
+ landau1->Draw("same");
+ landaun->Draw("same");
+
+ Double_t mp = landaun->GetParameter(1);
+ Double_t xi = landaun->GetParameter(2);
+ for (Int_t i = 1; i <= n; i++) {
+ Double_t x = i * (mp + xi * TMath::Log(i));
+ Double_t y = landaun->Eval(x);
+ Double_t y1 = y < 0.05 ? 1 : 0.01;
+ TArrow* a = new TArrow(x,y1,x,y,0.03,"|>");
+ Info("FitSteer", "Delta_{p,%d}=%f", i, x);
+ a->SetLineWidth(2);
+ a->SetAngle(30);
+ a->Draw();
+ }
+
+ c1->cd();
+}
--- /dev/null
+void
+LoadLibs()
+{
+ gSystem->Load("libVMC");
+ gSystem->Load("libTree");
+ gSystem->Load("libSTEERBase");
+ gSystem->Load("libESD");
+ gSystem->Load("libANALYSIS");
+ gSystem->Load("libANALYSISalice");
+ gSystem->Load("libPWG0base");
+ gSystem->Load("libPWG2forward");
+ gSystem->Load("libPWG2forward2");
+}
+