for (Int_t i=0; i<folderCount; ++i)
{
- Track2Particle1DCreatePlots(fileNames[i], folderNames[i], upperPtLimit);
+ Correction1DCreatePlots(fileNames[i], folderNames[i], upperPtLimit);
- TH1* corrX = dynamic_cast<TH1*> (gROOT->FindObject(Form("gene_%s_nTrackToNPart_x_div_meas_%s_nTrackToNPart_x", folderNames[i], folderNames[i])));
- TH1* corrY = dynamic_cast<TH1*> (gROOT->FindObject(Form("gene_%s_nTrackToNPart_y_div_meas_%s_nTrackToNPart_y", folderNames[i], folderNames[i])));
- TH1* corrZ = dynamic_cast<TH1*> (gROOT->FindObject(Form("gene_%s_nTrackToNPart_z_div_meas_%s_nTrackToNPart_z", folderNames[i], folderNames[i])));
+ TH1* corrX = dynamic_cast<TH1*> (gROOT->FindObject("generated_x_div_measured_x"));
+ TH1* corrY = dynamic_cast<TH1*> (gROOT->FindObject("generated_y_div_measured_y"));
+ TH1* corrZ = dynamic_cast<TH1*> (gROOT->FindObject("generated_z_div_measured_z"));
Prepare1DPlot(corrX);
Prepare1DPlot(corrY);
break;
// one species enhanced / reduced
- case 2: // + 50% pions
- case 3: // - 50% pions
- case 4: // + 50% kaons
- case 5: // - 50% kaons
- case 6: // + 50% protons
- case 7: // - 50% protons
- Int_t correctionIndex = (index - 2) / 2;
- Double_t scaleFactor = (index % 2 == 0) ? 1.5 : 0.5;
-
- fdNdEtaCorrection[correctionIndex]->GetTrack2ParticleCorrection()->GetMeasuredHistogram()->Scale(scaleFactor);
- fdNdEtaCorrection[correctionIndex]->GetTrack2ParticleCorrection()->GetGeneratedHistogram()->Scale(scaleFactor);
-
+ case 2: // + 30% kaons
+ case 3: // - 30% kaons
+ case 4: // + 30% protons
+ case 5: // - 30% protons
+ case 6: // + 30% kaons + 30% protons
+ case 7: // - 30% kaons - 30% protons
+ case 8: // + 30% kaons - 30% protons
+ case 9: // - 30% kaons + 30% protons
+ case 10: // + 30% others
+ case 11: // - 30% others
TString* str = new TString;
- str->Form("%s%s", (correctionIndex == 0) ? "Pi" : ((correctionIndex == 1) ? "K" : "p"), (index % 2 == 0) ? "Boosted" : "Reduced");
+ if (index < 6)
+ {
+ Int_t correctionIndex = index / 2;
+ Double_t scaleFactor = (index % 2 == 0) ? 1.3 : 0.7;
+
+ fdNdEtaCorrection[correctionIndex]->GetTrack2ParticleCorrection()->GetTrackCorrection()->Scale(scaleFactor);
+ str->Form("%s%s", (correctionIndex == 0) ? "Pi" : ((correctionIndex == 1) ? "K" : (correctionIndex == 2) ? "p" : "others"), (index % 2 == 0) ? "Boosted" : "Reduced");
+ }
+ else if (index < 10)
+ {
+ Double_t scaleFactor = (index % 2 == 0) ? 1.3 : 0.7;
+ fdNdEtaCorrection[1]->GetTrack2ParticleCorrection()->GetTrackCorrection()->Scale(scaleFactor);
+ str->Form("%s%s", "K", (scaleFactor > 1) ? "Boosted" : "Reduced");
+
+ if (index >= 8)
+ scaleFactor = (index % 2 == 0) ? 0.3 : 1.7;
+ fdNdEtaCorrection[2]->GetTrack2ParticleCorrection()->GetTrackCorrection()->Scale(scaleFactor);
+ *str += Form("%s%s", "p", (scaleFactor > 1) ? "Boosted" : "Reduced");
+ }
+ else
+ {
+ Double_t scaleFactor = (index % 2 == 0) ? 1.3 : 0.7;
+ fdNdEtaCorrection[3]->GetTrack2ParticleCorrection()->GetTrackCorrection()->Scale(scaleFactor);
+ str->Form("%s%s", "others", (scaleFactor > 1) ? "Boosted" : "Reduced");
+ }
+
return str->Data();
break;
for (Int_t i=0; i<3; ++i)
{
- ScalePtDependent(fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetMeasuredHistogram(), ptDists[i]);
- ScalePtDependent(fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetGeneratedHistogram(), ptDists[i]);
+ ScalePtDependent(fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetMeasuredHistogram(), ptDists[i]);
+ ScalePtDependent(fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetGeneratedHistogram(), ptDists[i]);
}
TString* str = new TString;
str->Form("%s%s", (functionIndex == 0) ? "Pi" : ((functionIndex == 1) ? "K" : "p"), (index % 2 == 0) ? "Boosted" : "Reduced");
case 999:
TF1* ptDependence = new TF1("simple", "x", 0, 100);
- ScalePtDependent(fdNdEtaCorrection[0]->GetTrack2ParticleCorrection()->GetGeneratedHistogram(), ptDependence);
- ScalePtDependent(fdNdEtaCorrection[0]->GetTrack2ParticleCorrection()->GetMeasuredHistogram(), ptDependence);
+ ScalePtDependent(fdNdEtaCorrection[0]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetGeneratedHistogram(), ptDependence);
+ ScalePtDependent(fdNdEtaCorrection[0]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetMeasuredHistogram(), ptDependence);
break;
}
void Composition()
{
- gSystem->Load("libPWG0base");
+ loadlibs();
gSystem->Unlink("new_compositions.root");
+ gSystem->Unlink("new_compositions_analysis.root");
+
+ const char* names[] = { "pi", "K", "p", "other" };
+ TH1* hRatios[20];
- Int_t nCompositions = 8;
- for (Int_t comp = 0; comp < nCompositions; ++comp)
+ //backgroundEvents = 1162+434; // Michele for MB1, run 104892, 15.02.10
+ backgroundEvents = -1; // use 0 bin from MC! for 2.36 TeV
+
+ Printf("Subtracting %d background events!!!", backgroundEvents);
+ gSystem->Sleep(1000);
+
+ Int_t nCompositions = 12;
+ Int_t counter = 0;
+ for (Int_t comp = 1; comp < nCompositions; ++comp)
{
AlidNdEtaCorrection* fdNdEtaCorrection[4];
- TFile::Open("systematics.root");
+ TFile::Open("correction_mapparticle-species.root");
for (Int_t i=0; i<4; ++i)
{
TString name;
- name.Form("correction_%d", i);
+ name.Form("dndeta_correction_%s", names[i]);
fdNdEtaCorrection[i] = new AlidNdEtaCorrection(name, name);
- fdNdEtaCorrection[i]->LoadHistograms("systematics.root", name);
+ fdNdEtaCorrection[i]->LoadHistograms();
}
const char* newName = ChangeComposition(fdNdEtaCorrection, comp);
for (Int_t i=0; i<4; ++i)
{
- geneCount[i] = fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetGeneratedHistogram()->Integral();
- measCount[i] = fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetMeasuredHistogram()->Integral();
+ geneCount[i] = fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetGeneratedHistogram()->Integral();
+ measCount[i] = fdNdEtaCorrection[i]->GetTrack2ParticleCorrection()->GetTrackCorrection()->GetMeasuredHistogram()->Integral();
geneCount[4] += geneCount[i];
measCount[4] += measCount[i];
TList* collection = new TList;
- for (Int_t i=0; i<4; ++i)
+ // skip "other" particle correction here
+ // with them has to be dealt differently, maybe just increasing the neutral particles...
+ for (Int_t i=1; i<4; ++i)
collection->Add(fdNdEtaCorrection[i]);
- AlidNdEtaCorrection* newComposition = new AlidNdEtaCorrection(newName, newName);
- newComposition->Merge(collection);
- newComposition->Finish();
+ fdNdEtaCorrection[0]->Merge(collection);
+ fdNdEtaCorrection[0]->Finish();
delete collection;
+ // save everything
TFile* file = TFile::Open("new_compositions.root", "UPDATE");
- newComposition->SaveHistograms();
+ fdNdEtaCorrection[0]->SetName(newName);
+ fdNdEtaCorrection[0]->SaveHistograms();
//file->Write();
file->Close();
+
+ // correct dNdeta distribution with modified correction map
+ TFile::Open("analysis_esd_raw.root");
+
+ dNdEtaAnalysis* fdNdEtaAnalysis = new dNdEtaAnalysis("fdNdEtaAnalysisESD", "fdNdEtaAnalysisESD");
+ fdNdEtaAnalysis->LoadHistograms();
+
+ fdNdEtaAnalysis->Finish(fdNdEtaCorrection[0], 0.2, 3, newName);
+
+ hRatios[counter] = (TH1F*) fdNdEtaAnalysis->GetdNdEtaHistogram()->Clone(newName);
+ hRatios[counter]->SetTitle(newName);
+ hRatios[counter]->SetYTitle("dN_{ch}/d#eta ratio #frac{default composition}{modified composition}");
+
+ if (counter > 0)
+ hRatios[counter]->Divide(hRatios[0],hRatios[counter],1,1);
+
+ file = TFile::Open("new_compositions_analysis.root", "UPDATE");
+ hRatios[counter]->Write();
+ file->Close();
+
+ delete fdNdEtaAnalysis;
+
+ counter++;
}
+ /*
gROOT->ProcessLine(".L drawPlots.C");
const char* fileNames[] = {"new_compositions.root", "new_compositions.root", "new_compositions.root", "new_compositions.root", "new_compositions.root", "new_compositions.root", "new_compositions.root", "new_compositions.root" };
- const char* folderNames[] = { "Pythia", "PythiaRatios", "PiBoosted", "PiReduced", "KBoosted", "KReduced", "pBoosted", "pReduced" };
+ const char* folderNames[] = { "PythiaRatios", "PiBoosted", "PiReduced", "KBoosted", "KReduced", "pBoosted", "pReduced" };
Track2Particle1DComposition(fileNames, nCompositions, folderNames);
+ */
}
canvas2->SaveAs("particlecomposition_result.eps");
}
-void mergeCorrectionsWithDifferentCrosssections(Int_t correctionTarget = 3, Char_t* correctionFileName="correction_mapprocess-types.root", const char* analysisFileName = "analysis_esd_raw.root", const Char_t* outputFileName="systematics_vtxtrigger_compositions.root") {
+void mergeCorrectionsWithDifferentCrosssections(Int_t origin, Int_t correctionTarget = 3, Char_t* correctionFileName="correction_mapprocess-types.root", const char* analysisFileName = "analysis_esd_raw.root", const Char_t* outputFileName=0) {
//
// Function used to merge standard corrections with vertex
// reconstruction corrections obtained by a certain mix of ND, DD
// correctionTarget is of type AlidNdEtaCorrection::CorrectionType
// kINEL = 3
// kNSD = 4
+ // kOnePart = 6
+
+ if (outputFileName == 0)
+ {
+ if (correctionTarget == 3)
+ outputFileName = "systematics_vtxtrigger_compositions_inel.root";
+ if (correctionTarget == 4)
+ outputFileName = "systematics_vtxtrigger_compositions_nsd.root";
+ if (correctionTarget == 6)
+ outputFileName = "systematics_vtxtrigger_compositions_onepart.root";
+ }
loadlibs();
const Char_t* typeName[] = { "vertexreco", "trigger", "vtxtrigger" };
+ //Karel:
+// fsd = 0.153 +- 0.031 (0.050 to take into account SD definition) --> change
+// fdd = 0.080 +- 0.050 --> change
+// fnd = 0.767 +- 0.059 --> keep (error small)
+
+// const Char_t* changes[] = { "pythia","ddmore","ddless","sdmore","sdless", "dmore", "dless", "sdlessddmore", "sdmoreddless", "ddmore25","ddless25","sdmore25","sdless25", "dmore25", "dless25", "sdlessddmore25", "sdmoreddless25"};
+ //Float_t scalesND[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
+ //Float_t scalesDD[] = {1.0, 1.5, 0.5, 1.0, 1.0, 1.5, 0.5, 1.5, 0.5, 1.25, 0.75, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75};
+ //Float_t scalesSD[] = {1.0, 1.0, 1.0, 1.5, 0.5, 1.5, 0.5, 0.5, 1.5, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75, 0.75, 1.25};
+ //Float_t scalesDD[] = {1.0, 1.4, 0.6, 1.0, 1.0, 1.4, 0.6, 1.4, 0.6, 1.25, 0.75, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75};
+ //Float_t scalesSD[] = {1.0, 1.0, 1.0, 1.4, 0.6, 1.4, 0.6, 0.4, 1.6, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75, 0.75, 1.25};
+/* Int_t nChanges = 9;
+
+ const Char_t* changes[] = { "ua5","ddmore","ddless","sdmore","sdless", "dmore", "dless", "sdlessddmore", "sdmoreddless" };
+ Float_t scalesND[] = {0.767, 0.767, 0.767, 0.767, 0.767, 0.767, 0.767, 0.767, 0.767};
+ Float_t scalesDD[] = {0.080, 0.130, 0.030, 0.080, 0.080, 0.130, 0.030, 0.130, 0.030};
+ Float_t scalesSD[] = {0.153, 0.153, 0.153, 0.203, 0.103, 0.203, 0.103, 0.103, 0.203};*/
+
+ Float_t ref_SD = -1;
+ Float_t ref_DD = -1;
+ Float_t ref_ND = -1;
+
+ Float_t error_SD = -1;
+ Float_t error_DD = -1;
+ Float_t error_ND = -1;
+
+ GetRelativeFractions(origin, ref_SD, ref_DD, ref_ND, error_SD, error_DD, error_ND);
+
+ Printf("Using x-sections:\n SD: %f +- %f\n DD: %f +- %f\n ND: %f +- %f", ref_SD, error_SD, ref_DD, error_DD, ref_ND, error_ND);
+
+ const Char_t* changes[] = { "default","sdless","sdmore","ddless","ddmore", "dless", "dmore", "sdlessddmore", "sdmoreddless" };
+ Int_t nChanges = 9;
+ Float_t scalesSD[9];
+ Float_t scalesDD[9];
+ Float_t scalesND[9];
+
+ if (1)
+ {
+ // sample 8 points on the error ellipse
+ for (Int_t i=0; i<9; i++)
+ {
+ Float_t factorSD = 0;
+ Float_t factorDD = 0;
+
+ if (i > 0 && i < 3)
+ factorSD = (i % 2 == 0) ? 1 : -1;
+ else if (i >= 3 && i < 5)
+ factorDD = (i % 2 == 0) ? 1 : -1;
+ else if (i >= 5 && i < 9)
+ {
+ factorSD = ((i % 2 == 0) ? 1.0 : -1.0) / TMath::Sqrt(2);
+ if (i == 5 || i == 6)
+ factorDD = factorSD;
+ else
+ factorDD = -factorSD;
+ }
+
+ scalesSD[i] = ref_SD + factorSD * error_SD;
+ scalesDD[i] = ref_DD + factorDD * error_DD;
+ scalesND[i] = 1.0 - scalesDD[i] - scalesSD[i];
+
+ Printf("Case %d: SD: %f DD: %f ND: %f", i, scalesSD[i], scalesDD[i], scalesND[i]);
+ }
+ }
+ else
+ {
+ Printf("WARNING: Special treatment for ratios active");
+ gSystem->Sleep(1000);
+
+ // constrained values by allowed changing of cross-sections
+ Float_t pythiaScaling = 0.224 / 0.189;
+
+ if (origin == 10)
+ {
+ // 900 GeV
+ for (Int_t i=0; i<9; i++)
+ {
+ scalesSD[i] = 15.3;
+ scalesDD[i] = 9.5;
+ }
+ scalesSD[1] = 15.7;
+ scalesSD[2] = 17.6;
+ scalesSD[3] = 13.5;
+ scalesSD[4] = 17.6;
- const Char_t* changes[] = { "pythia","ddmore","ddless","sdmore","sdless", "dmore", "dless", "sdmoreddless", "sdlessddmore", "ddmore25","ddless25","sdmore25","sdless25", "dmore25", "dless25", "sdmoreddless25", "sdlessddmore25"};
- Float_t scalesND[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
- Float_t scalesDD[] = {1.0, 1.5, 0.5, 1.0, 1.0, 1.5, 0.5, 1.5, 0.5, 1.25, 0.75, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75};
- Float_t scalesSD[] = {1.0, 1.0, 1.0, 1.5, 0.5, 1.5, 0.5, 0.5, 1.5, 1.0, 1.0, 1.25, 0.75, 1.25, 0.75, 0.75, 1.25};
- Int_t nChanges = 17;
+ scalesDD[5] = 15.5;
+ scalesDD[6] = 8.8;
+ scalesDD[7] = 13.8;
+ scalesDD[8] = 7.6;
+ }
+ else if (origin == 20)
+ {
+ // 2.36 TeV
+ pythiaScaling = 0.217 / 0.167;
+
+ for (Int_t i=0; i<9; i++)
+ {
+ scalesSD[i] = 15.9;
+ scalesDD[i] = 10.7;
+ }
+
+ scalesSD[1] = 13.5;
+ scalesSD[2] = 15.2;
+ scalesSD[3] = 13.5;
+ scalesSD[4] = 17.6;
+
+ scalesDD[5] = 13.8;
+ scalesDD[6] = 7.6;
+ scalesDD[7] = 13.8;
+ scalesDD[8] = 7.6;
+ }
+ else
+ AliFatal("Not supported");
+ for (Int_t i=0; i<9; i++)
+ {
+ scalesSD[i] /= 100;
+ scalesSD[i] *= pythiaScaling;
+ scalesDD[i] /= 100;
+ scalesND[i] = 1.0 - scalesDD[i] - scalesSD[i];
+ Printf("Case %d: SD: %f DD: %f ND: %f", i, scalesSD[i], scalesDD[i], scalesND[i]);
+ }
+ }
+
+ Int_t backgroundEvents = 0;
+
+ //backgroundEvents = 1162+434; // Michele for MB1, run 104892, 15.02.10
+ //backgroundEvents = 6; // Michele for V0AND, run 104892, 15.02.10
+
+ //backgroundEvents = 4398+961; // Michele for MB1, run 104824-52, 16.02.10
+ //backgroundEvents = 19; // Michele for V0AND, run 104824-52, 16.02.10
+
+ backgroundEvents = -1; // use 0 bin from MC! for 2.36 TeV
+
+ Printf("Subtracting %d background events!!!", backgroundEvents);
+ gSystem->Sleep(1000);
+
/*
const Char_t* changes[] = { "pythia", "qgsm", "phojet"};
Float_t scalesND[] = {1.0, 1.10, 1.11};
// cross section from Pythia
// 14 TeV!
- Float_t sigmaND = 55.2;
- Float_t sigmaDD = 9.78;
- Float_t sigmaSD = 14.30;
+// Float_t sigmaND = 55.2;
+// Float_t sigmaDD = 9.78;
+// Float_t sigmaSD = 14.30;
// standard correction
TFile::Open(correctionFileName);
correctionStandard->GetTriggerBiasCorrectionINEL()->Reset();
correctionStandard->GetTriggerBiasCorrectionNSD()->Reset();
correctionStandard->GetTriggerBiasCorrectionND()->Reset();
+ correctionStandard->GetTriggerBiasCorrectionOnePart()->Reset();
AlidNdEtaCorrection* corrections[100];
TH1F* hRatios[100];
Int_t counter = 0;
- for (Int_t j=0; j<3; j++) { // j = 0 (change vtx), j = 1 (change trg), j = 2 (change both)
+ for (Int_t j=2; j<3; j++) { // j = 0 (change vtx), j = 1 (change trg), j = 2 (change both)
for (Int_t i=0; i<nChanges; i++) {
TFile::Open(correctionFileName);
dNdEtaCorrectionSD->LoadHistograms();
// calculating relative
- Float_t nd = 100 * sigmaND/(sigmaND + (scalesDD[i]*sigmaDD) + (scalesDD[i]*sigmaSD));
+ Float_t nd = dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t dd = dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t sd = dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t total = nd + dd + sd;
+
+ nd /= total;
+ sd /= total;
+ dd /= total;
+
+ Printf("Ratios in the correction map are: ND=%f, DD=%f, SD=%f", nd, dd, sd);
+
+ Float_t scaleND = scalesND[i] / nd;
+ Float_t scaleDD = scalesDD[i] / dd;
+ Float_t scaleSD = scalesSD[i] / sd;
+
+ Printf("ND=%.2f, DD=%.2f, SD=%.2f",scaleND, scaleDD, scaleSD);
+
+/* Float_t nd = 100 * sigmaND/(sigmaND + (scalesDD[i]*sigmaDD) + (scalesDD[i]*sigmaSD));
Float_t dd = 100 * (scalesDD[i]*sigmaDD)/(sigmaND + (scalesDD[i]*sigmaDD) + (scalesDD[i]*sigmaSD));
Float_t sd = 100 * (scalesSD[i]*sigmaSD)/(sigmaND + (scalesDD[i]*sigmaDD) + (scalesDD[i]*sigmaSD));
- printf(Form("%s : ND=%.2f\%, DD=%.2f\%, SD=%.2f\% \n",changes[i],nd,dd,sd));
- current->SetTitle(Form("ND=%.2f\%,DD=%.2f\%,SD=%.2f\%",nd,dd,sd));
+ printf(Form("%s : ND=%.2f\%, DD=%.2f\%, SD=%.2f\% \n",changes[i],nd,dd,sd));*/
+ current->SetTitle(Form("ND=%.2f\%,DD=%.2f\%,SD=%.2f\%",scaleND,scaleDD,scaleSD));
current->SetTitle(name);
// scale
if (j == 0 || j == 2)
{
- dNdEtaCorrectionND->GetVertexRecoCorrection()->Scale(scalesND[i]);
- dNdEtaCorrectionDD->GetVertexRecoCorrection()->Scale(scalesDD[i]);
- dNdEtaCorrectionSD->GetVertexRecoCorrection()->Scale(scalesSD[i]);
+ dNdEtaCorrectionND->GetVertexRecoCorrection()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetVertexRecoCorrection()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetVertexRecoCorrection()->Scale(scaleSD);
}
if (j == 1 || j == 2)
{
- dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->Scale(scalesND[i]);
- dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->Scale(scalesDD[i]);
- dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->Scale(scalesSD[i]);
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->Scale(scaleSD);
- dNdEtaCorrectionND->GetTriggerBiasCorrectionNSD()->Scale(scalesND[i]);
- dNdEtaCorrectionDD->GetTriggerBiasCorrectionNSD()->Scale(scalesDD[i]);
- dNdEtaCorrectionSD->GetTriggerBiasCorrectionNSD()->Scale(scalesSD[i]);
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionNSD()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionNSD()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionNSD()->Scale(scaleSD);
- dNdEtaCorrectionND->GetTriggerBiasCorrectionND()->Scale(scalesND[i]);
- dNdEtaCorrectionDD->GetTriggerBiasCorrectionND()->Scale(scalesDD[i]);
- dNdEtaCorrectionSD->GetTriggerBiasCorrectionND()->Scale(scalesSD[i]);
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionND()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionND()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionND()->Scale(scaleSD);
+
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionOnePart()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionOnePart()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionOnePart()->Scale(scaleSD);
}
//clear track in correction
current->Merge(&collection);
current->Finish();
+
+ // print 0 bin efficiency
+ TH1* hist2 = current->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->Get1DCorrection("y", -10, 10);
+ if (hist2->GetBinContent(1))
+ {
+ Float_t triggerEff = 100.0 / hist2->GetBinContent(1);
+ Printf("trigger eff in 0 bin is: %.2f %%", triggerEff);
+ }
corrections[counter] = current;
dNdEtaAnalysis* fdNdEtaAnalysis = new dNdEtaAnalysis("fdNdEtaAnalysisESD", "fdNdEtaAnalysisESD");
fdNdEtaAnalysis->LoadHistograms();
- fdNdEtaAnalysis->Finish(current, 0.3, correctionTarget, Form("%d %d", j, i));
+ fdNdEtaAnalysis->Finish(current, 0.151, correctionTarget, Form("%d %d", j, i), backgroundEvents);
name = "ratio";
if (j==0) name.Append("_vetexReco_");
hRatios[counter] = (TH1F*) fdNdEtaAnalysis->GetdNdEtaHistogram()->Clone(name);
- name.Append(Form(" (DD #times %0.2f, SD #times %0.2f)",scalesDD[i],scalesSD[i]));
+ name.Form("ND #times %0.2f DD #times %0.2f, SD #times %0.2f",scaleND,scaleDD,scaleSD);
hRatios[counter]->SetTitle(name.Data());
- hRatios[counter]->SetYTitle("ratio (Pythia x-sections)/(changed x-sections)");
-
+ hRatios[counter]->SetYTitle("dN_{ch}/d#eta ratio #frac{default cross-section}{modified cross-sections}");
+
+ TF1* pol0 = new TF1("pol0", "[0]", -0.49, 0.49);
+ pol0->SetParameter(0, 0);
+ hRatios[counter]->Fit(pol0, "RN");
+ Printf("Case %d: %f", i, pol0->GetParameter(0));
+
if (counter > 0)
hRatios[counter]->Divide(hRatios[0],hRatios[counter],1,1);
// to make everything consistent
hRatios[0]->Divide(hRatios[0],hRatios[0],1,1);
- for (Int_t i=0; i<nChanges * 3; i++)
+ for (Int_t i=0; i<counter; i++)
{
corrections[i]->SaveHistograms();
hRatios[i]->Write();
fout->Close();
}
-
-DrawVertexRecoSyst() {
- // Draws the ratio of the dN/dEta obtained with changed SD and DD
- // cross-sections vertex reco corrections to the dN/dEta obtained
- // from the standard pythia cross-sections
+void GetRelativeFractions(Int_t origin, Float_t& ref_SD, Float_t& ref_DD, Float_t& ref_ND, Float_t& error_SD, Float_t& error_DD, Float_t& error_ND)
+{
+ // origin:
+ // -1 = Pythia (test)
+ // 0 = UA5
+ // 1 = Data 1.8 TeV
+ // 2 = Tel-Aviv
+ // 3 = Durham
//
- // The files with the vertex reco corrections for different
- // processes (and with the other standard corrections) are expected
- // to have the names "analysis_esd_X.root", where the Xs are defined
- // in the array changes below.
-
- Char_t* changes[] = {"pythia","ddmore","ddless","sdmore","sdless", "dmore", "dless"};
- Char_t* descr[] = {"",
- "#sigma_{DD}' = 1.5#times#sigma_{DD}",
- "#sigma_{DD}' = 0.5#times#sigma_{DD}",
- "#sigma_{SD}' = 1.5#times#sigma_{SD}",
- "#sigma_{SD}' = 0.5#times#sigma_{SD}",
- "#sigma_{D}' = 1.5#times#sigma_{D}",
- "#sigma_{D}' = 0.5#times#sigma_{D}"};
-
- Float_t scalesDD[] = {1.0, 1.5, 0.5, 1.5, 0.5, 1.5, 0.5};
- Float_t scalesSD[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.5, 0.5};
-
- Int_t markers[] = {20,20,21,22,23,28,29};
- Int_t colors[] = {1,2,3,4,6,8,102};
-
- // cross section from Pythia
- Float_t sigmaND = 55.2;
- Float_t sigmaDD = 9.78;
- Float_t sigmaSD = 14.30;
-
- TH1F* dNdEta[7];
- TH1F* ratios[7];
- TFile* fin;
-
- for (Int_t i=0; i<7; i++) {
- // calculating relative
- fin = TFile::Open(Form("analysis_esd_%s.root",changes[i]));
-
- dNdEta[i] = (TH1F*)(fin->Get("dndeta/dndeta_dNdEta_corrected_2"))->Clone();
-
- for (Int_t b=0; b<dNdEta[i]->GetNbinsX(); b++) {
- if (TMath::Abs(dNdEta[i]->GetBinCenter(b))>0.9) {
- dNdEta[i]->SetBinContent(b,0);
- dNdEta[i]->SetBinError(b,0);
- }
- }
-
- dNdEta[i]->Rebin(4);
-
- dNdEta[i]->SetLineWidth(2);
- dNdEta[i]->SetLineColor(colors[i]);
- dNdEta[i]->SetMarkerStyle(markers[i]);
- dNdEta[i]->SetMarkerSize(0.9);
- dNdEta[i]->SetMarkerColor(colors[i]);
+ switch (origin)
+ {
+ case -10: // Pythia default at 900 GeV, 50% error
+ Printf("PYTHIA x-sections");
+ ref_SD = 0.223788; error_SD = ref_SD * 0.5;
+ ref_DD = 0.123315; error_DD = ref_DD * 0.5;
+ ref_ND = 0.652897; error_ND = 0;
+ break;
- ratios[i] = (TH1F*)dNdEta[i]->Clone("ratio");
- ratios[i]->Divide(ratios[i],dNdEta[0],1,1,"B");
+ case -1: // Pythia default at 900 GeV, as test
+ Printf("PYTHIA x-sections");
+ ref_SD = 0.223788;
+ ref_DD = 0.123315;
+ ref_ND = 0.652897;
+ break;
+
+ case 0: // UA5
+ Printf("UA5 x-sections a la first paper");
+ ref_SD = 0.153; error_SD = 0.05;
+ ref_DD = 0.080; error_DD = 0.05;
+ ref_ND = 0.767; error_ND = 0;
+ break;
+
+ case 10: // UA5
+ Printf("UA5 x-sections hadron level definition for Pythia");
+ // Fractions in Pythia with UA5 cuts selection for SD
+ // ND: 0.688662
+ // SD: 0.188588 --> this should be 15.3
+ // DD: 0.122750
+ ref_SD = 0.224 * 0.153 / 0.189; error_SD = 0.023 * 0.224 / 0.189;
+ ref_DD = 0.095; error_DD = 0.06;
+ ref_ND = 1.0 - ref_SD - ref_DD; error_ND = 0;
+ break;
+
+ case 11: // UA5
+ Printf("UA5 x-sections hadron level definition for Phojet");
+ // Fractions in Phojet with UA5 cuts selection for SD
+ // ND: 0.783573
+ // SD: 0.151601 --> this should be 15.3
+ // DD: 0.064827
+ ref_SD = 0.191 * 0.153 / 0.152; error_SD = 0.023 * 0.191 / 0.152;
+ ref_DD = 0.095; error_DD = 0.06;
+ ref_ND = 1.0 - ref_SD - ref_DD; error_ND = 0;
+ break;
+
+ case 20: // E710, 1.8 TeV
+ Printf("E710 x-sections hadron level definition for Pythia");
+ // ND: 0.705709
+ // SD: 0.166590 --> this should be 15.9
+ // DD: 0.127701
+ ref_SD = 0.217 * 0.159 / 0.167; error_SD = 0.024 * 0.217 / 0.167;
+ ref_DD = 0.075 * 1.43; error_DD = 0.02 * 1.43;
+ ref_ND = 1.0 - ref_SD - ref_DD; error_ND = 0;
+ break;
- ratios[i]->SetName(changes[i]);
- ratios[i]->SetTitle(changes[i]);
+ case 21: // E710, 1.8 TeV
+ Printf("E710 x-sections hadron level definition for Phojet");
+ // ND: 0.817462
+ // SD: 0.125506 --> this should be 15.9
+ // DD: 0.057032
+ ref_SD = 0.161 * 0.159 / 0.126; error_SD = 0.024 * 0.161 / 0.126;
+ ref_DD = 0.075 * 1.43; error_DD = 0.02 * 1.43;
+ ref_ND = 1.0 - ref_SD - ref_DD; error_ND = 0;
+ break;
+
+ case 1: // data 1.8 TeV
+ Printf("??? x-sections");
+ ref_SD = 0.152;
+ ref_DD = 0.092;
+ ref_ND = 1 - ref_SD - ref_DD;
+ break;
+
+ case 2: // tel-aviv model
+ Printf("Tel-aviv model x-sections");
+ ref_SD = 0.171;
+ ref_DD = 0.094;
+ ref_ND = 1 - ref_SD - ref_DD;
+ break;
+
+ case 3: // durham model
+ Printf("Durham model x-sections");
+ ref_SD = 0.190;
+ ref_DD = 0.125;
+ ref_ND = 1 - ref_SD - ref_DD;
+ break;
+
+ default:
+ AliFatal(Form("Unknown origin %d", origin));
}
-
- //##########################################################
-
- gStyle->SetOptStat(0);
- gStyle->SetOptTitle(0);
- gStyle->SetOptFit(0);
-
- gStyle->SetTextSize(0.2);
- gStyle->SetTitleSize(0.05,"xyz");
- gStyle->SetLabelOffset(0.01, "xyz");
+}
+void CreateCorrectionsWithUA5CrossSections(Int_t origin, const Char_t* correctionFileName="correction_mapprocess-types.root", const Char_t* outputFileName="correction_map2.root") {
+ //
+ // Function used to merge standard corrections with vertex
+ // reconstruction corrections obtained by a certain mix of ND, DD
+ // and SD events.
+ //
+ loadlibs();
- gStyle->SetTitleOffset(1.2, "y");
- gStyle->SetTitleOffset(1.2, "x");
- gStyle->SetEndErrorSize(0.0);
+ const Char_t* typeName[] = { "vertexreco", "trigger", "vtxtrigger" };
+
+ Float_t ref_SD = -1;
+ Float_t ref_DD = -1;
+ Float_t ref_ND = -1;
+
+ Float_t error_SD = -1;
+ Float_t error_DD = -1;
+ Float_t error_ND = -1;
+
+ GetRelativeFractions(origin, ref_SD, ref_DD, ref_ND, error_SD, error_DD, error_ND);
+
+ Printf("Using x-sections:\n SD: %f +- %f\n DD: %f +- %f\n ND: %f +- %f", ref_SD, error_SD, ref_DD, error_DD, ref_ND, error_ND);
+
+//Karel (UA5):
+// fsd = 0.153 +- 0.031
+// fdd = 0.080 +- 0.050
+// fnd = 0.767 +- 0.059
- //##############################################
+// Karel (1.8 TeV):
+//
+// Tel-Aviv model Sd/Inel = 0.171 Dd/Inel = 0.094
+// Durham model Sd/Inel = 0.190 Dd/Inel = 0.125
+// Data Sd/Inel = 0.152 +- 0.030 Dd/Inel = 0.092 +- 0.45
- //making canvas and pads
- TCanvas *c = new TCanvas(Form("vertex_reco_syst_%s", plot), Form("vertex_reco_syst_%s", plot),600,500);
+ // standard correction
+ TFile::Open(correctionFileName);
+ AlidNdEtaCorrection* correctionStandard = new AlidNdEtaCorrection("dndeta_correction","dndeta_correction");
+ correctionStandard->LoadHistograms();
- TPad* p1 = new TPad("pad1","", 0, 0.0, 1.0, 1.0, 0, 0, 0);
+ // dont take vertexreco from this one
+ correctionStandard->GetVertexRecoCorrection()->Reset();
+ // dont take triggerbias from this one
+ correctionStandard->GetTriggerBiasCorrectionINEL()->Reset();
+ correctionStandard->GetTriggerBiasCorrectionNSD()->Reset();
+ correctionStandard->GetTriggerBiasCorrectionND()->Reset();
+ correctionStandard->GetTriggerBiasCorrectionOnePart()->Reset();
- p1->SetBottomMargin(0.15);
- p1->SetTopMargin(0.03);
- p1->SetLeftMargin(0.15);
- p1->SetRightMargin(0.03);
+ AlidNdEtaCorrection* corrections[100];
+ TH1F* hRatios[100];
- p1->SetGridx();
- p1->SetGridy();
+ Int_t counter = 0;
+
+ TFile::Open(correctionFileName);
- p1->Draw();
- p1->cd();
+ AlidNdEtaCorrection* current = new AlidNdEtaCorrection("dndeta_correction_ua5", "dndeta_correction_ua5");
+ current->LoadHistograms("dndeta_correction");
+ current->Reset();
+
+ TString name;
+ name.Form("dndeta_correction_ND");
+ AlidNdEtaCorrection* dNdEtaCorrectionND = new AlidNdEtaCorrection(name,name);
+ dNdEtaCorrectionND->LoadHistograms();
+ name.Form("dndeta_correction_DD");
+ AlidNdEtaCorrection* dNdEtaCorrectionDD = new AlidNdEtaCorrection(name,name);
+ dNdEtaCorrectionDD->LoadHistograms();
+ name.Form("dndeta_correction_SD");
+ AlidNdEtaCorrection* dNdEtaCorrectionSD = new AlidNdEtaCorrection(name,name);
+ dNdEtaCorrectionSD->LoadHistograms();
+
+ // calculating relative
+ Float_t nd = dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t dd = dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t sd = dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral();
+ Float_t total = nd + dd + sd;
+ nd /= total;
+ sd /= total;
+ dd /= total;
- TH2F* null = new TH2F("","",100,-1.5,1.5,100,0.9601,1.099);
- null->SetXTitle("#eta");
- null->GetXaxis()->CenterTitle(kTRUE);
- null->SetYTitle("dN/d#eta / dN/d#eta_{pythia}");
- null->GetYaxis()->CenterTitle(kTRUE);
- null->Draw();
-
- for (Int_t i=1; i<7; i++)
- ratios[i]->Draw("same");
-
- TLegend* legend = new TLegend(0.6, 0.6, 0.95, 0.95);
- legend->SetFillColor(0);
+ Printf("Ratios in the correction map are: ND=%f, DD=%f, SD=%f", nd, dd, sd);
+
+ Float_t scaleND = ref_ND / nd;
+ Float_t scaleDD = ref_DD / dd;
+ Float_t scaleSD = ref_SD / sd;
+
+ Printf("ND=%.2f, DD=%.2f, SD=%.2f",scaleND, scaleDD, scaleSD);
- TLatex* text[7];
- for (Int_t i=1; i<7; i++) {
- legend->AddEntry(dNdEta[i], descr[i]);
+ // scale
+ dNdEtaCorrectionND->GetVertexRecoCorrection()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetVertexRecoCorrection()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetVertexRecoCorrection()->Scale(scaleSD);
+
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->Scale(scaleSD);
+
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionNSD()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionNSD()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionNSD()->Scale(scaleSD);
+
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionND()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionND()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionND()->Scale(scaleSD);
+
+ dNdEtaCorrectionND->GetTriggerBiasCorrectionOnePart()->Scale(scaleND);
+ dNdEtaCorrectionDD->GetTriggerBiasCorrectionOnePart()->Scale(scaleDD);
+ dNdEtaCorrectionSD->GetTriggerBiasCorrectionOnePart()->Scale(scaleSD);
+
+ //clear track in correction
+ dNdEtaCorrectionND->GetTrack2ParticleCorrection()->Reset();
+ dNdEtaCorrectionDD->GetTrack2ParticleCorrection()->Reset();
+ dNdEtaCorrectionSD->GetTrack2ParticleCorrection()->Reset();
+
+ TList collection;
+ collection.Add(correctionStandard);
+ collection.Add(dNdEtaCorrectionND);
+ collection.Add(dNdEtaCorrectionDD);
+ collection.Add(dNdEtaCorrectionSD);
+
+ current->Merge(&collection);
+ current->Finish();
+
+ // print 0 bin efficiency
+ TH1* hist2 = current->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->Get1DCorrection("y", -10, 10);
+ if (hist2->GetBinContent(1) > 0)
+ {
+ Float_t triggerEff = 100.0 / hist2->GetBinContent(1);
+ Printf("trigger eff in 0 bin is: %.2f %%", triggerEff);
}
+
+ TFile* fout = new TFile(outputFileName,"RECREATE");
+ current->SaveHistograms();
- legend->Draw();
- //text(0.2,0.88,"Effect of changing",0.045,1,kTRUE);
- //text(0.2,0.83,"relative cross-sections",0.045,1,kTRUE);
- //text(0.2,0.78,"(vertex reconstruction corr.)",0.043,13,kTRUE);
+ fout->Write();
+ fout->Close();
- c->SaveAs(Form("%s.gif", c->GetName()));
- c->SaveAs(Form("%s.eps", c->GetName()));
+ Printf("Trigger efficiencies:");
+ Printf("ND: %.2f %%", 100.0 * dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral() / dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral());
+ Printf("SD: %.2f %%", 100.0 * dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral() / dNdEtaCorrectionSD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral());
+ Printf("DD: %.2f %%", 100.0 * dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral() / dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral());
+ Printf("INEL: %.2f %%", 100.0 * current->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral() / current->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral());
+ Printf("NSD: %.2f %%", 100.0 * (dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral() + dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetMeasuredHistogram()->Integral()) / (dNdEtaCorrectionND->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral() + dNdEtaCorrectionDD->GetTriggerBiasCorrectionINEL()->GetEventCorrection()->GetGeneratedHistogram()->Integral()));
}
-
-
DrawTriggerEfficiency(Char_t* fileName) {
gStyle->SetOptStat(0);
generatedPt[0]->Draw("same");
}
-void changePtSpectrum(const char* fileName = "analysis_mc.root")
+void changePtSpectrum(const char* fileName = "analysis_mc.root", Float_t ptCutOff = 0.2, const char* fileName2 = 0)
{
+ Float_t factor = 0.5;
+
TFile* file = TFile::Open(fileName);
- TH1F* hist = dynamic_cast<TH1F*> (file->Get("dndeta_check_pt"));
+ TH1F* hist = dynamic_cast<TH1F*> (file->Get("dndeta_check_pt")->Clone());
+
+ TH1* hist2 = 0;
+ if (fileName2)
+ {
+ file2 = TFile::Open(fileName2);
+ hist2 = dynamic_cast<TH1*> (file2->Get("dndeta_check_pt")->Clone());
+ hist2->Scale(hist->GetBinContent(hist->FindBin(ptCutOff)) / hist2->GetBinContent(hist2->FindBin(ptCutOff)));
+ }
+
+ //hist->Scale(1.0 / hist->Integral());
//hist->Rebin(3);
//hist->Scale(1.0/3);
TH1F* scale1 = dynamic_cast<TH1F*> (hist->Clone("scale1"));
TH1F* scale2 = dynamic_cast<TH1F*> (hist->Clone("scale2"));
- Float_t ptCutOff = 0.3;
-
for (Int_t i=1; i <= hist->GetNbinsX(); ++i)
{
if (hist->GetBinCenter(i) > ptCutOff)
else
{
// 90 % at pt = 0, 0% at pt = ptcutoff
- scale1->SetBinContent(i, 1 - (ptCutOff - hist->GetBinCenter(i)) / ptCutOff * 0.3);
+ scale1->SetBinContent(i, 1 - (ptCutOff - hist->GetBinCenter(i)) / ptCutOff * factor);
// 110% at pt = 0, ...
- scale2->SetBinContent(i, 1 + (ptCutOff - hist->GetBinCenter(i)) / ptCutOff * 0.3);
+ scale2->SetBinContent(i, 1 + (ptCutOff - hist->GetBinCenter(i)) / ptCutOff * factor);
}
scale1->SetBinError(i, 0);
scale2->SetBinError(i, 0);
}
+ /*
new TCanvas;
-
scale1->Draw();
scale2->SetLineColor(kRed);
scale2->Draw("SAME");
+ */
clone1->Multiply(scale1);
clone2->Multiply(scale2);
clone2->SetMarkerStyle(markers[0]);*/
hist->SetTitle(";p_{T} in GeV/c;dN_{ch}/dp_{T} in c/GeV");
- hist->GetXaxis()->SetRangeUser(0, 0.7);
+ hist->GetXaxis()->SetRangeUser(0, 0.5);
hist->GetYaxis()->SetRangeUser(0.01, clone2->GetMaximum() * 1.1);
hist->GetYaxis()->SetTitleOffset(1);
- TCanvas* canvas = new TCanvas;
+ TCanvas* canvas = new TCanvas("c", "c", 600, 600);
+ gPad->SetGridx();
+ gPad->SetGridy();
+ gPad->SetTopMargin(0.05);
+ gPad->SetRightMargin(0.05);
gPad->SetBottomMargin(0.12);
hist->Draw("H");
clone1->SetLineColor(kRed);
clone2->SetLineColor(kBlue);
clone2->Draw("HSAME");
hist->Draw("HSAME");
+
+ if (hist2)
+ {
+ Prepare1DPlot(hist2);
+ hist2->SetLineStyle(2);
+ hist2->Draw("HSAME");
+ }
Float_t fraction = hist->Integral(hist->GetXaxis()->FindBin(ptCutOff), hist->GetNbinsX()) / hist->Integral(1, hist->GetNbinsX());
Float_t fraction1 = clone1->Integral(clone1->GetXaxis()->FindBin(ptCutOff), clone1->GetNbinsX()) / clone1->Integral(1, clone1->GetNbinsX());
printf("%f %f %f\n", fraction, fraction1, fraction2);
printf("Rel. %f %f\n", fraction1 / fraction, fraction2 / fraction);
- canvas->SaveAs("changePtSpectrum.gif");
+ //canvas->SaveAs("changePtSpectrum.gif");
canvas->SaveAs("changePtSpectrum.eps");
}
legend->Draw();
}
+
+void ChangePtInCorrection(const char* fileName = "correction_map.root", const char* dirName = "dndeta_correction")
+{
+ loadlibs();
+ if (!TFile::Open(fileName))
+ return;
+
+ AlidNdEtaCorrection* dNdEtaCorrection = new AlidNdEtaCorrection(dirName, dirName);
+ if (!dNdEtaCorrection->LoadHistograms())
+ return;
+
+ dNdEtaCorrection->Finish();
+
+ AliCorrection* corr = dNdEtaCorrection->GetCorrection(AlidNdEtaCorrection::kTrack2Particle);
+
+ Printf(">>>> Before");
+ corr->PrintInfo(0);
+
+ Float_t factor = 0.5;
+ Float_t ptCutOff = 0.2;
+
+ TH3* gene = corr->GetTrackCorrection()->GetGeneratedHistogram();
+ TH3* meas = corr->GetTrackCorrection()->GetMeasuredHistogram();
+
+ for (Int_t z = 1; z <= gene->GetZaxis()->FindBin(ptCutOff - 0.01); z++)
+ {
+ Float_t localFactor = 1 - (ptCutOff - gene->GetZaxis()->GetBinCenter(z)) / ptCutOff * factor;
+ Printf("%f %f", gene->GetZaxis()->GetBinCenter(z), localFactor);
+ for (Int_t x = 1; x <= gene->GetNbinsX(); x++)
+ for (Int_t y = 1; y <= gene->GetNbinsY(); y++)
+ {
+ gene->SetBinContent(x, y, z, gene->GetBinContent(x, y, z) * localFactor);
+ meas->SetBinContent(x, y, z, meas->GetBinContent(x, y, z) * localFactor);
+ }
+ }
+
+ dNdEtaCorrection->Finish();
+
+ Printf(">>>> After");
+ corr->PrintInfo(0);
+}
+
+Float_t FitAverage(TH1* hist, Int_t n, Float_t* begin, Float_t *end, Int_t color, Int_t& totalBins)
+{
+ Float_t average = 0;
+ totalBins = 0;
+
+ for (Int_t i=0; i<n; i++)
+ {
+ func = new TF1("func", "[0]", hist->GetXaxis()->GetBinLowEdge(hist->GetXaxis()->FindBin(begin[i])), hist->GetXaxis()->GetBinUpEdge(hist->GetXaxis()->FindBin(end[i])));
+ Int_t bins = hist->GetXaxis()->FindBin(end[i]) - hist->GetXaxis()->FindBin(begin[i]) + 1;
+ func->SetParameter(0, 1);
+ func->SetLineColor(color);
+
+ hist->Fit(func, "RNQ");
+ func->Draw("SAME");
+
+ average += func->GetParameter(0) * bins;
+ totalBins += bins;
+ }
+
+ return average / totalBins;
+}
+
+void SPDIntegrateGaps(Bool_t all, const char* mcFile = "../../../LHC10b2/v0or/spd/analysis_esd_raw.root")
+{
+ Float_t eta = 1.29;
+ Int_t binBegin = ((TH2*) gFile->Get("fEtaPhi"))->GetXaxis()->FindBin(-eta);
+ Int_t binEnd = ((TH2*) gFile->Get("fEtaPhi"))->GetXaxis()->FindBin(eta);
+
+ Printf("eta range: %f bins: %d %d", eta, binBegin, binEnd);
+
+ if (!all)
+ Printf("Eta smaller than 0 side");
+
+ c = new TCanvas;
+ TFile::Open("analysis_esd_raw.root");
+ hist = ((TH2*) gFile->Get("fEtaPhi"))->ProjectionY("hist", binBegin, (all) ? binEnd : 40);
+ hist->Rebin(2);
+ hist->SetStats(0);
+ hist->Sumw2();
+ hist->Draw("HIST");
+ gPad->SetGridx();
+ gPad->SetGridy();
+
+ TFile::Open(mcFile);
+ mcHist = ((TH2*) gFile->Get("fEtaPhi"))->ProjectionY("mcHist", binBegin, (all) ? binEnd : 40);
+ mcHist->Rebin(2);
+ mcHist->SetLineColor(2);
+ mcHist->Scale(hist->Integral() / mcHist->Integral());
+ mcHist->Draw("SAME");
+
+ Float_t add = 0;
+ Int_t bins;
+
+ Float_t okRangeBegin[] = { 0.04, 0.67, 1.34 };
+ Float_t okRangeEnd[] = { 0.55, 1.24, 1.63 };
+ Float_t gapRangeBegin[] = { 0.6, 1.27 };
+ Float_t gapRangeEnd[] = { 0.65, 1.32 };
+ Float_t averageOK = FitAverage(hist, 3, okRangeBegin, okRangeEnd, 1, bins);
+ Float_t averageGap = FitAverage(hist, 2, gapRangeBegin, gapRangeEnd, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin2[] = { 2.4, 2.65 };
+ Float_t okRangeEnd2[] = { 2.55, 3.2 };
+ Float_t gapRangeBegin2[] = { 2.59, 3.3 };
+ Float_t gapRangeEnd2[] = { 2.61, 3.3 };
+ averageOK = FitAverage(hist, 2, okRangeBegin2, okRangeEnd2, 1, bins);
+ averageGap = FitAverage(hist, 2, gapRangeBegin2, gapRangeEnd2, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin3[] = { 3.55, 3.9 };
+ Float_t okRangeEnd3[] = { 3.8, 4.15 };
+ Float_t gapRangeBegin3[] = { 3.83 };
+ Float_t gapRangeEnd3[] = { 3.86 };
+ averageOK = FitAverage(hist, 2, okRangeBegin3, okRangeEnd3, 1, bins);
+ averageGap = FitAverage(hist, 1, gapRangeBegin3, gapRangeEnd3, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin4[] = { 4.2, 4.5 };
+ Float_t okRangeEnd4[] = { 4.4, 4.7 };
+ Float_t gapRangeBegin4[] = { 4.45 };
+ Float_t gapRangeEnd4[] = { 4.45 };
+ averageOK = FitAverage(hist, 2, okRangeBegin4, okRangeEnd4, 1, bins);
+ averageGap = FitAverage(hist, 1, gapRangeBegin4, gapRangeEnd4, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin5[] = { 5.4, 5.7 };
+ Float_t okRangeEnd5[] = { 5.6, 5.8 };
+ Float_t gapRangeBegin5[] = { 5.63 };
+ Float_t gapRangeEnd5[] = { 5.67 };
+ averageOK = FitAverage(hist, 2, okRangeBegin5, okRangeEnd5, 1, bins);
+ averageGap = FitAverage(hist, 1, gapRangeBegin5, gapRangeEnd5, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Printf("This adds %.2f %% to the total number of tracklets (%f)", 100.0 * add / hist->Integral(), hist->Integral());
+ c->SaveAs("gap1.png");
+
+ add1 = add;
+ total1 = hist->Integral();
+
+ if (all)
+ return;
+
+ Printf("\nEta larger than 0 side");
+
+ c = new TCanvas;
+ TFile::Open("analysis_esd_raw.root");
+ hist = ((TH2*) gFile->Get("fEtaPhi"))->ProjectionY("hist2", 41, binEnd);
+ hist->Rebin(2);
+ hist->SetStats(0);
+ hist->Sumw2();
+ hist->Draw("HIST");
+ gPad->SetGridx();
+ gPad->SetGridy();
+
+ TFile::Open(mcFile);
+ mcHist = ((TH2*) gFile->Get("fEtaPhi"))->ProjectionY("mcHist", 41, binEnd);
+ mcHist->Rebin(2);
+ mcHist->SetLineColor(2);
+ mcHist->Scale(hist->Integral() / mcHist->Integral());
+ mcHist->Draw("SAME");
+
+ add = 0;
+
+ Float_t okRangeBegin[] = { 0.04, 0.67, 1.34 };
+ Float_t okRangeEnd[] = { 0.55, 1.24, 1.63 };
+ Float_t gapRangeBegin[] = { 0.6, 1.27 };
+ Float_t gapRangeEnd[] = { 0.65, 1.32 };
+ Float_t averageOK = FitAverage(hist, 3, okRangeBegin, okRangeEnd, 1, bins);
+ Float_t averageGap = FitAverage(hist, 2, gapRangeBegin, gapRangeEnd, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin2[] = { 2.32, 2.65 };
+ Float_t okRangeEnd2[] = { 2.55, 3.2 };
+ Float_t gapRangeBegin2[] = { 2.59, 3.3 };
+ Float_t gapRangeEnd2[] = { 2.61, 3.3 };
+ averageOK = FitAverage(hist, 2, okRangeBegin2, okRangeEnd2, 1, bins);
+ averageGap = FitAverage(hist, 2, gapRangeBegin2, gapRangeEnd2, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin3[] = { 3.55, 3.9 };
+ Float_t okRangeEnd3[] = { 3.8, 4.15 };
+ Float_t gapRangeBegin3[] = { 3.83 };
+ Float_t gapRangeEnd3[] = { 3.86 };
+ averageOK = FitAverage(hist, 2, okRangeBegin3, okRangeEnd3, 1, bins);
+ averageGap = FitAverage(hist, 1, gapRangeBegin3, gapRangeEnd3, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Float_t okRangeBegin4[] = { 4.2, 4.5 };
+ Float_t okRangeEnd4[] = { 4.4, 4.7 };
+ Float_t gapRangeBegin4[] = { 4.45 };
+ Float_t gapRangeEnd4[] = { 4.45 };
+ averageOK = FitAverage(hist, 2, okRangeBegin4, okRangeEnd4, 1, bins);
+ averageGap = FitAverage(hist, 1, gapRangeBegin4, gapRangeEnd4, 2, bins);
+ add += bins * (averageOK - averageGap);
+ Printf("Average OK: %f %f %d: %f", averageOK, averageGap, bins, add);
+
+ Printf("This adds %.2f %% to the total number of tracklets (%f)", 100.0 * add / hist->Integral(), hist->Integral());
+ c->SaveAs("gap2.png");
+
+ Printf("In total we add %.2f %%.", 100.0 * (add1 + add) / (total1 + hist->Integral()));
+}