]> git.uio.no Git - u/mrichter/AliRoot.git/commitdiff
updated equations for use with weights, added macro to merge grid output files
authorsnelling <snelling@f7af4fe6-9843-0410-8265-dc069ae4e863>
Fri, 29 Jan 2010 12:53:39 +0000 (12:53 +0000)
committersnelling <snelling@f7af4fe6-9843-0410-8265-dc069ae4e863>
Fri, 29 Jan 2010 12:53:39 +0000 (12:53 +0000)
PWG2/FLOW/AliFlowCommon/AliFlowAnalysisWithFittingQDistribution.cxx
PWG2/FLOW/AliFlowCommon/AliFlowAnalysisWithQCumulants.cxx
PWG2/FLOW/AliFlowCommon/AliFlowAnalysisWithQCumulants.h
PWG2/FLOW/macros/mergeOutput.C
PWG2/FLOW/macros/mergeOutputOnGrid.C [new file with mode: 0644]
PWG2/FLOW/macros/redoFinish.C

index bcd6fc20cef31255b722635da64d6f6988961c79..7f59497bcf497234297c50c443e67df305c23ebb 100644 (file)
@@ -954,8 +954,12 @@ void AliFlowAnalysisWithFittingQDistribution::PrintFinalResultsForIntegratedFlow
  }
  
  // shortcut for the harmonic:
- Int_t n = (Int_t)(fCommonHists->GetHarmonic())->GetBinContent(1); 
-
+ Int_t n = 0;
+ if(fCommonHists->GetHarmonic())
+ {
+  n = (Int_t)(fCommonHists->GetHarmonic())->GetBinContent(1); 
+ }
  // printing:
  cout<<" "<<endl;
  cout<<"***************************************"<<endl;
index 1e6596960538f61900c7109362375f475ba1824e..eaf7ec0f991bd99dc01c70127de0be9ff34ca6c0 100644 (file)
@@ -137,7 +137,9 @@ AliFlowAnalysisWithQCumulants::AliFlowAnalysisWithQCumulants():
  fDiffFlowFlags(NULL),\r
  fCalculate2DFlow(kFALSE),\r
  // 5.) distributions:\r
- fDistributionsList(NULL),\r
+ fDistributionsList(NULL),
+ fDistributionsFlags(NULL),
+ fStoreDistributions(kFALSE),\r
  // x.) debugging and cross-checking:\r
  fNestedLoopsList(NULL),\r
  fEvaluateIntFlowNestedLoops(kFALSE),\r
@@ -193,8 +195,9 @@ void AliFlowAnalysisWithQCumulants::Init()
 {\r
  // a) Access all common constants;\r
  // b) Book all objects;\r
- // c) Store flags for integrated and differential flow;\r
- // d) Store harmonic which will be estimated.\r
+ // c) Store flags for integrated and differential flow;
+ // d) Store flags for distributions of corelations;\r
+ // e) Store harmonic which will be estimated.\r
   \r
  // a) Access all common constants:\r
  this->AccessConstants();\r
@@ -211,8 +214,11 @@ void AliFlowAnalysisWithQCumulants::Init()
  // c) Store flags for integrated and differential flow:\r
  this->StoreIntFlowFlags();\r
  this->StoreDiffFlowFlags();\r
+ // d) Store flags for distributions of corelations:\r
+ this->StoreFlagsForDistributions();\r
 \r
- // d) Store harmonic which will be estimated:\r
+ // e) Store harmonic which will be estimated:\r
  this->StoreHarmonic();\r
  \r
 } // end of void AliFlowAnalysisWithQCumulants::Init()\r
@@ -227,7 +233,7 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
  \r
  //  a) Fill the common control histograms and call the method to fill fAvMultiplicity;\r
  //  b) Loop over data and calculate e-b-e quantities;\r
- //  c) call the methods;\r
+ //  c) Call all the methods;\r
  //  d) Debugging and cross-checking (evaluate nested loops);\r
  //  e) Reset all event by event quantities. \r
  \r
@@ -497,11 +503,18 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
   if(nRP>3) this->CalculateIntFlowProductOfCorrelations();\r
   if(nRP>1) this->CalculateIntFlowSumOfEventWeights();\r
   if(nRP>1) this->CalculateIntFlowSumOfProductOfEventWeights();\r
-  if(fApplyCorrectionForNUA && !(fUsePhiWeights||fUsePtWeights||fUseEtaWeights)) // to be improved (enable correction for NUA also when particle weights are used?)\r
-  {\r
-   if(nRP>0) this->CalculateIntFlowCorrectionsForNUASinTerms();\r
-   if(nRP>0) this->CalculateIntFlowCorrectionsForNUACosTerms();\r
-  }\r
+  if(fApplyCorrectionForNUA)\r
+  {\r
+   if(!(fUsePhiWeights||fUsePtWeights||fUseEtaWeights))
+   {
+    if(nRP>0) this->CalculateIntFlowCorrectionsForNUASinTerms();\r
+    if(nRP>0) this->CalculateIntFlowCorrectionsForNUACosTerms();\r
+   } else // to if(!(fUsePhiWeights||fUsePtWeights||fUseEtaWeights))
+     {
+      if(nRP>0) this->CalculateIntFlowCorrectionsForNUASinTermsUsingParticleWeights();\r
+      if(nRP>0) this->CalculateIntFlowCorrectionsForNUACosTermsUsingParticleWeights();     
+     }  
+  } // end of if(fApplyCorrectionForNUA)\r
  } // end of if(!fEvaluateIntFlowNestedLoops)\r
 \r
  // differential flow:\r
@@ -509,23 +522,43 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
  {\r
   if(!(fUsePhiWeights||fUsePtWeights||fUseEtaWeights))\r
   {\r
-   if(nRP>1) // to be improved (move this if somewhere else)\r
-   {\r
-    // without using particle weights:\r
-    this->CalculateDiffFlowCorrelations("RP","Pt"); \r
-    this->CalculateDiffFlowCorrelations("RP","Eta");\r
-    this->CalculateDiffFlowCorrelations("POI","Pt");\r
-    this->CalculateDiffFlowCorrelations("POI","Eta");\r
-   }  \r
-  } else\r
+   // without using particle weights:\r
+   this->CalculateDiffFlowCorrelations("RP","Pt"); \r
+   this->CalculateDiffFlowCorrelations("RP","Eta");\r
+   this->CalculateDiffFlowCorrelations("POI","Pt");\r
+   this->CalculateDiffFlowCorrelations("POI","Eta");
+   if(fApplyCorrectionForNUA)
+   {
+    this->CalculateDiffFlowCorrectionsForNUASinTerms("RP","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTerms("RP","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTerms("POI","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTerms("POI","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTerms("RP","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTerms("RP","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTerms("POI","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTerms("POI","Eta");   
+   } // end of if(fApplyCorrectionForNUA)  \r
+  } else // to if(!(fUsePhiWeights||fUsePtWeights||fUseEtaWeights))\r
     {\r
      // with using particle weights:   \r
      this->CalculateDiffFlowCorrelationsUsingParticleWeights("RP","Pt"); \r
      this->CalculateDiffFlowCorrelationsUsingParticleWeights("RP","Eta"); \r
      this->CalculateDiffFlowCorrelationsUsingParticleWeights("POI","Pt"); \r
      this->CalculateDiffFlowCorrelationsUsingParticleWeights("POI","Eta"); \r
+     if(fApplyCorrectionForNUA)
+     {
+      this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("RP","Pt");\r
+      this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("RP","Eta");\r
+      this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("POI","Pt");\r
+      this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("POI","Eta");\r
+      this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("RP","Pt");\r
+      this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("RP","Eta");\r
+      this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("POI","Pt");\r
+      this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("POI","Eta");   
+     } // end of if(fApplyCorrectionForNUA)  \r
     } \r
-    \r
+    
+  // whether or not using particle weights the following is calculated in the same way:  \r
   this->CalculateDiffFlowProductOfCorrelations("RP","Pt");\r
   this->CalculateDiffFlowProductOfCorrelations("RP","Eta");\r
   this->CalculateDiffFlowProductOfCorrelations("POI","Pt");\r
@@ -537,19 +570,7 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
   this->CalculateDiffFlowSumOfProductOfEventWeights("RP","Pt");\r
   this->CalculateDiffFlowSumOfProductOfEventWeights("RP","Eta");\r
   this->CalculateDiffFlowSumOfProductOfEventWeights("POI","Pt");\r
-  this->CalculateDiffFlowSumOfProductOfEventWeights("POI","Eta");\r
-  if(fApplyCorrectionForNUA && !(fUsePhiWeights||fUsePtWeights||fUseEtaWeights)) // to be improved (enable correction for NUA also when particle weights are used?)\r
-  {\r
-   this->CalculateDiffFlowCorrectionsForNUASinTerms("RP","Pt");\r
-   this->CalculateDiffFlowCorrectionsForNUASinTerms("RP","Eta");\r
-   this->CalculateDiffFlowCorrectionsForNUASinTerms("POI","Pt");\r
-   this->CalculateDiffFlowCorrectionsForNUASinTerms("POI","Eta");\r
-   this->CalculateDiffFlowCorrectionsForNUACosTerms("RP","Pt");\r
-   this->CalculateDiffFlowCorrectionsForNUACosTerms("RP","Eta");\r
-   this->CalculateDiffFlowCorrectionsForNUACosTerms("POI","Pt");\r
-   this->CalculateDiffFlowCorrectionsForNUACosTerms("POI","Eta");\r
-  }\r
-  \r
+  this->CalculateDiffFlowSumOfProductOfEventWeights("POI","Eta");   \r
  } // end of if(!fEvaluateDiffFlowNestedLoops)\r
 \r
 \r
@@ -573,8 +594,13 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
    } \r
   } // end of if(fCalculate2DFlow)\r
   */\r
+  
+ // distributions of correlations:
+ if(fStoreDistributions)
+ {
+  this->StoreDistributionsOfCorrelations();
+ }
   \r
\r
  // d) Debugging and cross-checking (evaluate nested loops):\r
  //  d1) cross-checking results for integrated flow:\r
  if(fEvaluateIntFlowNestedLoops)\r
@@ -598,6 +624,10 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
     // correlations:\r
     this->CalculateIntFlowCorrelationsUsingParticleWeights(); // from Q-vectors\r
     this->EvaluateIntFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent); // from nested loops (to be improved: do I have to pass here anEvent or not?)\r
+    // correction for non-uniform acceptance:\r
+    this->CalculateIntFlowCorrectionsForNUASinTermsUsingParticleWeights(); // from Q-vectors (sin terms)\r
+    this->CalculateIntFlowCorrectionsForNUACosTermsUsingParticleWeights(); // from Q-vectors (cos terms)\r
+    this->EvaluateIntFlowCorrectionsForNUAWithNestedLoopsUsingParticleWeights(anEvent); // from nested loops (both sin and cos terms)   
    }\r
   } else if (nPrim>fMaxAllowedMultiplicity) // to if(nPrim>0 && nPrim<=fMaxAllowedMultiplicity)\r
     {\r
@@ -652,10 +682,22 @@ void AliFlowAnalysisWithQCumulants::Make(AliFlowEventSimple* anEvent)
     this->CalculateDiffFlowCorrelationsUsingParticleWeights("RP","Eta"); \r
     this->CalculateDiffFlowCorrelationsUsingParticleWeights("POI","Pt"); \r
     this->CalculateDiffFlowCorrelationsUsingParticleWeights("POI","Eta"); \r
+    this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("RP","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("RP","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("POI","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights("POI","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("RP","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("RP","Eta");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("POI","Pt");\r
+    this->CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights("POI","Eta");\r
     this->EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent,"RP","Pt"); // to be improved (enabled eventually)\r
     this->EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent,"RP","Eta"); // to be improved (enabled eventually)\r
     this->EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent,"POI","Pt"); // to be improved (do I need to pass here anEvent?)\r
-    this->EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent,"POI","Eta"); // to be improved (do I need to pass here anEvent?)\r
+    this->EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(anEvent,"POI","Eta"); // to be improved (do I need to pass here anEvent?)    
+    //this->EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(anEvent,"RP","Pt"); // to be improved (enabled eventually)\r
+    //this->EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(anEvent,"RP","Eta"); // to be improved (enabled eventually)\r
+    this->EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(anEvent,"POI","Pt"); // to be improved (do I need to pass here anEvent?)\r
+    this->EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(anEvent,"POI","Eta"); // to be improved (do I need to pass here anEvent?)\r
    } // end of if(fUsePhiWeights||fUsePtWeights||fUseEtaWeights)\r
   } // end of if(nPrim>0 && nPrim<=fMaxAllowedMultiplicity) // by default fMaxAllowedMultiplicity = 10\r
  } // end of if(fEvaluateDiffFlowNestedLoops) \r
@@ -777,19 +819,15 @@ void AliFlowAnalysisWithQCumulants::Finish()
  this->PrintFinalResultsForIntegratedFlow("POI"); \r
   \r
  // g) cross-check the results: results from Q-vectors vs results from nested loops\r
  //  g1) integrated flow:\r
  if(fEvaluateIntFlowNestedLoops)\r
  {\r
-  if(!(fUsePhiWeights||fUsePtWeights||fUseEtaWeights)) \r
-  {\r
-   this->CrossCheckIntFlowCorrelations();\r
-   this->CrossCheckIntFlowCorrectionTermsForNUA(); \r
-  } else\r
-    {\r
-     this->CrossCheckIntFlowCorrelations();     \r
-     this->CrossCheckIntFlowExtraCorrelations();     \r
-    }\r
+  this->CrossCheckIntFlowCorrelations();\r
+  this->CrossCheckIntFlowCorrectionTermsForNUA(); \r
+  if(fUsePhiWeights||fUsePtWeights||fUseEtaWeights) this->CrossCheckIntFlowExtraCorrelations();     \r
  } // end of if(fEvaluateIntFlowNestedLoops)  \r
  //  g2) differential flow: \r
  if(fEvaluateDiffFlowNestedLoops) \r
  {\r
@@ -1495,7 +1533,6 @@ void AliFlowAnalysisWithQCumulants::BookEverythingForIntegratedFlow()
   // extra 2-p correlations:\r
   (fIntFlowExtraCorrelationsPro->GetXaxis())->SetBinLabel(1,"<<w1^3 w2 cos(n*(phi1-phi2))>>");\r
   (fIntFlowExtraCorrelationsPro->GetXaxis())->SetBinLabel(2,"<<w1 w2 w3^2 cos(n*(phi1-phi2))>>");\r
-  // ...\r
   fIntFlowProfiles->Add(fIntFlowExtraCorrelationsPro);\r
  } // end of if(fUsePhiWeights||fUsePtWeights||fUseEtaWeights)\r
  // average product of correlations <2>, <4>, <6> and <8>:  \r
@@ -1820,24 +1857,24 @@ void AliFlowAnalysisWithQCumulants::BookEverythingForNestedLoops()
     } // end of for(Int_t rci=0;rci<4;rci++) // correlation index\r
    } // end of for(Int_t pe=0;pe<2;pe++) // pt or eta \r
   } // end of for(Int_t t=0;t<2;t++) // type: RP or POI \r
- } // end of if(fEvaluateDiffFlowNestedLoops)\r
- // correction terms for non-uniform acceptance:\r
- TString diffFlowDirectCorrectionTermsForNUAName = "fDiffFlowDirectCorrectionTermsForNUA";\r
- diffFlowDirectCorrectionTermsForNUAName += fAnalysisLabel->Data();\r
- for(Int_t t=0;t<2;t++) // typeFlag (0 = RP, 1 = POI)\r
- { \r
-  for(Int_t pe=0;pe<2;pe++) // pt or eta\r
-  {\r
-   for(Int_t sc=0;sc<2;sc++) // sin or cos\r
+  // correction terms for non-uniform acceptance:\r
+  TString diffFlowDirectCorrectionTermsForNUAName = "fDiffFlowDirectCorrectionTermsForNUA";\r
+  diffFlowDirectCorrectionTermsForNUAName += fAnalysisLabel->Data();\r
+  for(Int_t t=0;t<2;t++) // typeFlag (0 = RP, 1 = POI)\r
+  { \r
+   for(Int_t pe=0;pe<2;pe++) // pt or eta\r
    {\r
-    for(Int_t cti=0;cti<9;cti++) // correction term index\r
+    for(Int_t sc=0;sc<2;sc++) // sin or cos\r
     {\r
-     fDiffFlowDirectCorrectionTermsForNUA[t][pe][sc][cti] = new TProfile(Form("%s, %s, %s, %s, cti = %d",diffFlowDirectCorrectionTermsForNUAName.Data(),typeFlag[t].Data(),ptEtaFlag[pe].Data(),sinCosFlag[sc].Data(),cti+1),Form("%s, %s, %s, %s, cti = %d",diffFlowDirectCorrectionTermsForNUAName.Data(),typeFlag[t].Data(),ptEtaFlag[pe].Data(),sinCosFlag[sc].Data(),cti+1),1,lowerPtEtaEdge[pe],upperPtEtaEdge[pe],"s"); \r
-     fNestedLoopsList->Add(fDiffFlowDirectCorrectionTermsForNUA[t][pe][sc][cti]);\r
+     for(Int_t cti=0;cti<9;cti++) // correction term index\r
+     {\r
+      fDiffFlowDirectCorrectionTermsForNUA[t][pe][sc][cti] = new TProfile(Form("%s, %s, %s, %s, cti = %d",diffFlowDirectCorrectionTermsForNUAName.Data(),typeFlag[t].Data(),ptEtaFlag[pe].Data(),sinCosFlag[sc].Data(),cti+1),Form("%s, %s, %s, %s, cti = %d",diffFlowDirectCorrectionTermsForNUAName.Data(),typeFlag[t].Data(),ptEtaFlag[pe].Data(),sinCosFlag[sc].Data(),cti+1),1,lowerPtEtaEdge[pe],upperPtEtaEdge[pe],"s"); \r
+      fNestedLoopsList->Add(fDiffFlowDirectCorrectionTermsForNUA[t][pe][sc][cti]);\r
+     }\r
     }\r
    }\r
-  }\r
- } \r
+  } \r
+ } // end of if(fEvaluateDiffFlowNestedLoops)\r
 \r
 } // end of AliFlowAnalysisWithQCumulants::BookEverythingForNestedLoops()\r
 \r
@@ -3178,8 +3215,7 @@ void AliFlowAnalysisWithQCumulants::CalculateIntFlowCorrelationsUsingParticleWei
  \r
  // 1st bin: two1n1nW3W1 = <w1^3 w2 cos(n*(phi1-phi2))>\r
  // 2nd bin: two1n1nW1W1W2 = <w1 w2 w3^2 cos(n*(phi1-phi2))>  \r
- // ...\r
-  \r
  // multiplicity (number of particles used to determine the reaction plane)\r
  Double_t dMult = (*fSMpk)(0,0);\r
  \r
@@ -4340,62 +4376,130 @@ void AliFlowAnalysisWithQCumulants::CalculateFinalResultsForRPandPOIIntegratedFl
 \r
 void AliFlowAnalysisWithQCumulants::InitializeArraysForDistributions()\r
 {\r
- // initialize arrays used for distributions:\r
\r
- /*\r
\r
- for(Int_t pW=0;pW<2;pW++) // particle weights not used (0) or used (1)\r
- {\r
-  for(Int_t eW=0;eW<2;eW++)\r
-  {\r
-   for(Int_t di=0;di<4;di++) // distribution index\r
-   {\r
-    fDistributions[pW][eW][di] = NULL;\r
-   }\r
-  } \r
+ // Initialize all arrays used for distributions.
+ // a) Initialize arrays of histograms used to hold distributions of correlations; 
+ // b) Initialize array to hold min and max values of correlations.
+ // a) Initialize arrays of histograms used to hold distributions of correlations:
+ for(Int_t di=0;di<4;di++) // distribution index\r
+ {\r
+  fDistributions[di] = NULL;\r
  }\r
\r
- */\r
+ // b) Initialize default min and max values of correlations:
+ //    (Remark: The default values bellow were chosen for v2=5% and M=500)
+ fMinValueOfCorrelation[0] = -0.01; // <2>_min 
+ fMaxValueOfCorrelation[0] = 0.04; // <2>_max 
+ fMinValueOfCorrelation[1] = -0.00002; // <4>_min 
+ fMaxValueOfCorrelation[1] = 0.00015; // <4>_max  
+ fMinValueOfCorrelation[2] = -0.0000003; // <6>_min 
+ fMaxValueOfCorrelation[2] = 0.0000006; // <6>_max  
+ fMinValueOfCorrelation[3] = -0.000000006; // <8>_min 
+ fMaxValueOfCorrelation[3] = 0.000000003; // <8>_max 
  \r
 } // end of void AliFlowAnalysisWithQCumulants::InitializeArraysForDistributions()\r
 \r
-\r
+
 //================================================================================================================================\r
 \r
 \r
 void AliFlowAnalysisWithQCumulants::BookEverythingForDistributions()\r
 {\r
- // book all histograms for distributions\r
\r
- /*\r
- //weighted <2>_{n|n} distribution\r
- f2pDistribution = new TH1D("f2pDistribution","<2>_{n|n} distribution",100000,-0.02,0.1);\r
- f2pDistribution->SetXTitle("<2>_{n|n}");\r
- f2pDistribution->SetYTitle("Counts");\r
- fHistList->Add(f2pDistribution);\r
-\r
- //weighted <4>_{n,n|n,n} distribution\r
- f4pDistribution = new TH1D("f4pDistribution","<4>_{n,n|n,n} distribution",100000,-0.00025,0.002);\r
- f4pDistribution->SetXTitle("<4>_{n,n|n,n}");\r
- f4pDistribution->SetYTitle("Counts");\r
- fHistList->Add(f4pDistribution); \r
\r
- //weighted <6>_{n,n,n|n,n,n} distribution\r
- f6pDistribution = new TH1D("f6pDistribution","<6>_{n,n,n|n,n,n} distribution",100000,-0.000005,0.000025);\r
- f6pDistribution->SetXTitle("<6>_{n,n,n|n,n,n}");\r
- f6pDistribution->SetYTitle("Counts");\r
- fHistList->Add(f6pDistribution);\r
\r
- //weighted <8>_{n,n,n,n|n,n,n,n} distribution\r
- f8pDistribution = new TH1D("f8pDistribution","<8>_{n,n,n,n|n,n,n,n} distribution",100000,-0.000000001,0.00000001);\r
- f8pDistribution->SetXTitle("<8>_{n,n,n,n|n,n,n,n}");\r
- f8pDistribution->SetYTitle("Counts");\r
- fHistList->Add(f8pDistribution);\r
- */\r
\r
+ // a) Book profile to hold all flags for distributions of correlations;
+ // b) Book all histograms to hold distributions of correlations.
+ TString correlationIndex[4] = {"<2>","<4>","<6>","<8>"}; // to be improved (should I promote this to data members?)
+  
+ // a) Book profile to hold all flags for distributions of correlations:
+ TString distributionsFlagsName = "fDistributionsFlags";\r
+ distributionsFlagsName += fAnalysisLabel->Data();\r
+ fDistributionsFlags = new TProfile(distributionsFlagsName.Data(),"Flags for Distributions of Correlations",9,0,9);\r
+ fDistributionsFlags->SetTickLength(-0.01,"Y");\r
+ fDistributionsFlags->SetMarkerStyle(25);\r
+ fDistributionsFlags->SetLabelSize(0.05);\r
+ fDistributionsFlags->SetLabelOffset(0.02,"Y");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(1,"Store or not?");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(2,"<2>_{min}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(3,"<2>_{max}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(4,"<4>_{min}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(5,"<4>_{max}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(6,"<6>_{min}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(7,"<6>_{max}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(8,"<8>_{min}");\r
+ (fDistributionsFlags->GetXaxis())->SetBinLabel(9,"<8>_{max}");\r
+ fDistributionsList->Add(fDistributionsFlags);\r
+ // b) Book all histograms to hold distributions of correlations.
+ if(fStoreDistributions)
+ { 
+  TString distributionsName = "fDistributions";\r
+  distributionsName += fAnalysisLabel->Data();\r
+  for(Int_t di=0;di<4;di++) // distribution index\r
+  {
+   fDistributions[di] = new TH1D(Form("Distribution of %s",correlationIndex[di].Data()),Form("Distribution of %s",correlationIndex[di].Data()),10000,fMinValueOfCorrelation[di],fMaxValueOfCorrelation[di]); \r
+   fDistributions[di]->SetXTitle(correlationIndex[di].Data());\r
+   fDistributionsList->Add(fDistributions[di]);\r
+  } // end of for(Int_t di=0;di<4;di++) // distribution index
+ } // end of if(fStoreDistributions)
 } // end of void AliFlowAnalysisWithQCumulants::BookEverythingForDistributions()\r
 \r
 \r
+//================================================================================================================================\r
+\r
+
+void AliFlowAnalysisWithQCumulants::StoreFlagsForDistributions()\r
+{\r
+ // Store all flags for distributiuons of correlations in profile fDistributionsFlags.\r
\r
+ if(!fDistributionsFlags)\r
+ {\r
+  cout<<"WARNING: fDistributionsFlags is NULL in AFAWQC::SDF() !!!!"<<endl;\r
+  exit(0);\r
+ } \r
+\r
+ fDistributionsFlags->Fill(0.5,(Int_t)fStoreDistributions); // histos with distributions of correlations stored or not in the output file
+ // store min and max values of correlations:
+ for(Int_t di=0;di<4;di++) // distribution index\r
+ {\r
+  fDistributionsFlags->Fill(1.5+2.*(Double_t)di,fMinValueOfCorrelation[di]);
+  fDistributionsFlags->Fill(2.5+2.*(Double_t)di,fMaxValueOfCorrelation[di]);
+ }\r
+     \r
+} // end of void AliFlowAnalysisWithQCumulants::StoreFlagsForDistributions()\r
+\r
+
+//================================================================================================================================\r
+
+
+void AliFlowAnalysisWithQCumulants::StoreDistributionsOfCorrelations()
+{
+ // Store distributions of correlations.
+ if(!(fIntFlowCorrelationsEBE && fIntFlowEventWeightsForCorrelationsEBE))\r
+ {\r
+  cout<<"WARNING: fIntFlowCorrelationsEBE && fIntFlowEventWeightsForCorrelationsEBE"<<endl; \r
+  cout<<"         is NULL in AFAWQC::SDOC() !!!!"<<endl;\r
+  exit(0);\r
+ }\r
+
+ for(Int_t di=0;di<4;di++) // distribution index\r
+ {
+  if(!fDistributions[di])\r
+  { \r
+   cout<<"WARNING: fDistributions[di] is NULL in AFAWQC::SDOC() !!!!"<<endl;\r
+   cout<<"di = "<<di<<endl;\r
+   exit(0);\r
+  } else 
+    {
+     fDistributions[di]->Fill(fIntFlowCorrelationsEBE->GetBinContent(di+1),fIntFlowEventWeightsForCorrelationsEBE->GetBinContent(di+1)); 
+    } \r
+ } // end of for(Int_t di=0;di<4;di++) // distribution index\r
+
+} // end of void AliFlowAnalysisWithQCumulants::StoreDistributionsOfCorrelations()
+
+
 //================================================================================================================================\r
 \r
 \r
@@ -8807,7 +8911,7 @@ void AliFlowAnalysisWithQCumulants::EvaluateIntFlowCorrectionsForNUAWithNestedLo
  // Evaluate with nested loops correction terms for non-uniform acceptance relevant for NONAME integrated flow (to be improved (name)).\r
  //\r
  // Remark: Both sin and cos correction terms are calculated in this method. Sin terms are stored in fIntFlowDirectCorrectionTermsForNUA[0],\r
- // and cos terms in fIntFlowCorrectionTermsForNUAPro[sc]fIntFlowDirectCorrectionTermsForNUA[1]. Binning of fIntFlowDirectCorrectionTermsForNUA[sc] is organized as follows \r
+ // and cos terms in fIntFlowDirectCorrectionTermsForNUA[1]. Binning of fIntFlowDirectCorrectionTermsForNUA[sc] is organized as follows \r
  // (sc stands for either sin or cos):\r
  \r
  //  1st bin: <<sc(n*(phi1))>> \r
@@ -9435,4 +9539,902 @@ void AliFlowAnalysisWithQCumulants::CrossCheckDiffFlowCorrectionTermsForNUA(TStr
 } // end of void AliFlowAnalysisWithQCumulants::CrossCheckDiffFlowCorrectionTermsForNUA(TString type, TString ptOrEta)\r
 \r
 \r
+//================================================================================================================================
+
+\r
+void AliFlowAnalysisWithQCumulants::CalculateIntFlowCorrectionsForNUACosTermsUsingParticleWeights()\r
+{\r
+ // Calculate corrections using particle weights for non-uniform acceptance of the detector for no-name integrated flow (cos terms).\r
\r
+ //                                  **********************************************************************\r
+ //                                  **** weighted corrections for non-uniform acceptance (cos terms): ****\r
+ //                                  **********************************************************************\r
+ // Remark 1: When particle weights are used the binning of fIntFlowCorrectionTermsForNUAPro[1] is organized as follows:\r
+ //
+ // 1st bin: <<w1 cos(n*(phi1))>> = cosP1nW1\r
+ // 2nd bin: <<w1 w2 cos(n*(phi1+phi2))>> = cosP1nP1nW1W1\r
+ // 3rd bin: <<w1 w2 w3 cos(n*(phi1-phi2-phi3))>> = cosP1nM1nM1nW1W1W1 \r
+ // ...\r
+
+ // multiplicity (number of particles used to determine the reaction plane)\r
+ Double_t dMult = (*fSMpk)(0,0);\r
\r
+ // real and imaginary parts of weighted Q-vectors evaluated in harmonics n, 2n, 3n and 4n: \r
+ Double_t dReQ1n1k = (*fReQ)(0,1);\r
+ Double_t dReQ2n2k = (*fReQ)(1,2);\r
+ //Double_t dReQ3n3k = (*fReQ)(2,3);\r
+ //Double_t dReQ4n4k = (*fReQ)(3,4);\r
+ Double_t dReQ1n3k = (*fReQ)(0,3);\r
+ Double_t dImQ1n1k = (*fImQ)(0,1);\r
+ Double_t dImQ2n2k = (*fImQ)(1,2);\r
+ //Double_t dImQ3n3k = (*fImQ)(2,3);\r
+ //Double_t dImQ4n4k = (*fImQ)(3,4);\r
+ //Double_t dImQ1n3k = (*fImQ)(0,3);\r
+\r
+ // dMs are variables introduced in order to simplify some Eqs. bellow:\r
+ //..............................................................................................\r
+ Double_t dM11 = (*fSMpk)(1,1)-(*fSMpk)(0,2); // dM11 = sum_{i,j=1,i!=j}^M w_i w_j\r
+ Double_t dM111 = (*fSMpk)(2,1)-3.*(*fSMpk)(0,2)*(*fSMpk)(0,1)
+                + 2.*(*fSMpk)(0,3); // dM111 = sum_{i,j,k=1,i!=j!=k}^M w_i w_j w_k\r
+ //..............................................................................................\r
+        \r // 1-particle:\r
+ Double_t cosP1nW1 = 0.; // <<w1 cos(n*(phi1))>>\r
+   \r
+ if(dMult>0 && (*fSMpk)(0,1) !=0.)\r
+ {\r
+  cosP1nW1 = dReQ1n1k/(*fSMpk)(0,1); \r
+  \r
+  // average weighted 1-particle correction (cos terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[1]->SetBinContent(1,cosP1nW1);\r
+  \r
+  // final average weighted 1-particle correction (cos terms) for non-uniform acceptance for all events:\r
+  fIntFlowCorrectionTermsForNUAPro[1]->Fill(0.5,cosP1nW1,(*fSMpk)(0,1));  \r
+ } \r
\r
+ // 2-particle:\r
+ Double_t cosP1nP1nW1W1 = 0.; // <<w1 w2 cos(n*(phi1+phi2))>>\r
\r
+ if(dMult>1 && dM11 !=0.)\r
+ {\r
+  cosP1nP1nW1W1 = (pow(dReQ1n1k,2)-pow(dImQ1n1k,2)-dReQ2n2k)/dM11; \r
+  \r
+  // average weighted 2-particle correction (cos terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[1]->SetBinContent(2,cosP1nP1nW1W1);\r
+  \r
+  // final average weighted 2-particle correction (cos terms) for non-uniform acceptance for all events:\r
+  fIntFlowCorrectionTermsForNUAPro[1]->Fill(1.5,cosP1nP1nW1W1,dM11);  \r
+ } \r
\r
+ // 3-particle:\r
+ Double_t cosP1nM1nM1nW1W1W1 = 0.; // <<w1 w2 w3 cos(n*(phi1-phi2-phi3))>>\r
\r
+ if(dMult>2 && dM111 !=0.)\r
+ {\r
+  cosP1nM1nM1nW1W1W1 = (dReQ1n1k*(pow(dReQ1n1k,2)+pow(dImQ1n1k,2))
+                     - dReQ1n1k*dReQ2n2k-dImQ1n1k*dImQ2n2k
+                     - 2.*((*fSMpk)(0,2))*dReQ1n1k
+                     + 2.*dReQ1n3k) \r
+                     / dM111; \r
+  \r
+  // average non-weighted 3-particle correction (cos terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[1]->SetBinContent(3,cosP1nM1nM1nW1W1W1);\r
+  \r
+  // final average non-weighted 3-particle correction (cos terms) for non-uniform acceptance for all events:\r
+  fIntFlowCorrectionTermsForNUAPro[1]->Fill(2.5,cosP1nM1nM1nW1W1W1,dM111);  \r
+ } \r
\r
+} // end of AliFlowAnalysisWithQCumulants::CalculateIntFlowCorrectionsForNUACosTermsUsingParticleWeights()\r
+\r
+\r
 //================================================================================================================================\r
+\r
+\r
+void AliFlowAnalysisWithQCumulants::CalculateIntFlowCorrectionsForNUASinTermsUsingParticleWeights()\r
+{\r
+ // calculate corrections using particle weights for non-uniform acceptance of the detector for no-name integrated flow (sin terms)\r
\r
+ //                                  **********************************************************************\r
+ //                                  **** weighted corrections for non-uniform acceptance (sin terms): ****\r
+ //                                  **********************************************************************\r
+ // Remark 1: When particle weights are used the binning of fIntFlowCorrectionTermsForNUAPro[0] is organized as follows:\r
+ //
+ // 1st bin: <<w1 sin(n*(phi1))>> = sinP1nW1\r
+ // 2nd bin: <<w1 w2 sin(n*(phi1+phi2))>> = sinP1nP1nW1W1\r
+ // 3rd bin: <<w1 w2 w3 sin(n*(phi1-phi2-phi3))>> = sinP1nM1nM1nW1W1W1 \r
+ // ...\r
+
+ // multiplicity (number of particles used to determine the reaction plane)\r
+ Double_t dMult = (*fSMpk)(0,0);\r
\r
+ // real and imaginary parts of weighted Q-vectors evaluated in harmonics n, 2n, 3n and 4n: \r
+ Double_t dReQ1n1k = (*fReQ)(0,1);\r
+ Double_t dReQ2n2k = (*fReQ)(1,2);\r
+ //Double_t dReQ3n3k = (*fReQ)(2,3);\r
+ //Double_t dReQ4n4k = (*fReQ)(3,4);\r
+ //Double_t dReQ1n3k = (*fReQ)(0,3);\r
+ Double_t dImQ1n1k = (*fImQ)(0,1);\r
+ Double_t dImQ2n2k = (*fImQ)(1,2);\r
+ //Double_t dImQ3n3k = (*fImQ)(2,3);\r
+ //Double_t dImQ4n4k = (*fImQ)(3,4);\r
+ Double_t dImQ1n3k = (*fImQ)(0,3);\r
+\r
+ // dMs are variables introduced in order to simplify some Eqs. bellow:\r
+ //..............................................................................................\r
+ Double_t dM11 = (*fSMpk)(1,1)-(*fSMpk)(0,2); // dM11 = sum_{i,j=1,i!=j}^M w_i w_j\r
+ Double_t dM111 = (*fSMpk)(2,1)-3.*(*fSMpk)(0,2)*(*fSMpk)(0,1)
+                + 2.*(*fSMpk)(0,3); // dM111 = sum_{i,j,k=1,i!=j!=k}^M w_i w_j w_k\r
+ //..............................................................................................\r
\r
+ // 1-particle:\r
+ Double_t sinP1nW1 = 0.; // <<w1 sin(n*(phi1))>>\r
\r
+ if(dMult>0 && (*fSMpk)(0,1) !=0.)\r
+ {\r
+  sinP1nW1 = dImQ1n1k/((*fSMpk)(0,1)); \r
+     \r
+  // average weighted 1-particle correction (sin terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[0]->SetBinContent(1,sinP1nW1);\r
+  \r
+  // final average weighted 1-particle correction (sin terms) for non-uniform acceptance for all events:   \r
+  fIntFlowCorrectionTermsForNUAPro[0]->Fill(0.5,sinP1nW1,(*fSMpk)(0,1));  \r
+ } \r
\r
+ // 2-particle:\r
+ Double_t sinP1nP1nW1W1 = 0.; // <<w1 w2 sin(n*(phi1+phi2))>>\r
\r
+ if(dMult>1 && dM11 !=0.)\r
+ {\r
+  sinP1nP1nW1W1 = (2.*dReQ1n1k*dImQ1n1k-dImQ2n2k)/dM11; \r
+     \r
+  // average weighted 2-particle correction (sin terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[0]->SetBinContent(2,sinP1nP1nW1W1);\r
+  \r
+  // final average weighted 1-particle correction (sin terms) for non-uniform acceptance for all events:      \r
+  fIntFlowCorrectionTermsForNUAPro[0]->Fill(1.5,sinP1nP1nW1W1,dM11);  \r
+ } \r
\r
+ // 3-particle:\r
+ Double_t sinP1nM1nM1nW1W1W1 = 0.; // <<w1 w2 w3 sin(n*(phi1-phi2-phi3))>>\r
\r
+ if(dMult>2 && dM111 !=0.)\r
+ {\r
+  sinP1nM1nM1nW1W1W1 = (-dImQ1n1k*(pow(dReQ1n1k,2)+pow(dImQ1n1k,2))
+                     + dReQ1n1k*dImQ2n2k-dImQ1n1k*dReQ2n2k
+                     + 2.*((*fSMpk)(0,2))*dImQ1n1k
+                     - 2.*dImQ1n3k)\r
+                     / dM111; \r
+  \r
+  // average weighted 3-particle correction (sin terms) for non-uniform acceptance for single event:\r
+  fIntFlowCorrectionTermsForNUAEBE[0]->SetBinContent(3,sinP1nM1nM1nW1W1W1);\r
+  \r
+  // final average weighted 3-particle correction (sin terms) for non-uniform acceptance for all events:  \r
+  fIntFlowCorrectionTermsForNUAPro[0]->Fill(2.5,sinP1nM1nM1nW1W1W1,dM111);  \r
+ } \r
\r
+} // end of AliFlowAnalysisWithQCumulants::CalculateIntFlowCorrectionsForNUASinTermsUsingParticleWeights()\r
+\r
+\r
+//================================================================================================================================
+\r
+\r
+void AliFlowAnalysisWithQCumulants::EvaluateIntFlowCorrectionsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* anEvent)\r
+{\r
+ // Evaluate with nested loops correction terms for non-uniform acceptance for integrated flow (using the particle weights). \r
+\r
+ // Results are stored in profiles fIntFlowDirectCorrectionTermsForNUA[0] (sin terms) and
+ // fIntFlowDirectCorrectionTermsForNUA[1] (cos terms). 
\r
+ // Remark 1: When particle weights are used the binning of fIntFlowDirectCorrectionTermsForNUA[sc] is 
+ // organized as follows (sc stands for either sin or cos):\r
+ //\r
+ // 1st bin: <<w1 sc(n*(phi1))>> = scP1nW1\r
+ // 2nd bin: <<w1 w2 sc(n*(phi1+phi2))>> = scP1nP1nW1W1\r
+ // 3rd bin: <<w1 w2 w3 sc(n*(phi1-phi2-phi3))>> = scP1nM1nM1nW1W1W1 \r
+ // ...\r
\r
+ Int_t nPrim = anEvent->NumberOfTracks(); \r
+ AliFlowTrackSimple *aftsTrack = NULL;\r
+ //Double_t phi1=0., phi2=0., phi3=0., phi4=0., phi5=0., phi6=0., phi7=0., phi8=0.;\r
+ //Double_t wPhi1=1., wPhi2=1., wPhi3=1., wPhi4=1., wPhi5=1., wPhi6=1., wPhi7=1., wPhi8=1.;\r
+ Double_t phi1=0., phi2=0., phi3=0.;\r
+ Double_t wPhi1=1., wPhi2=1., wPhi3=1.;\r
+ Int_t n = fHarmonic; \r
+ Int_t eventNo = (Int_t)fAvMultiplicity->GetBinEntries(1); // to be improved (is this casting safe in general?)\r
+ Double_t dMult = (*fSMpk)(0,0);\r
+ cout<<endl;\r
+ cout<<"Correction terms for non-uniform acceptance: Event number: "<<eventNo<<", multiplicity is "<<dMult<<endl;\r
+ if(dMult<1)\r
+ {\r
+  cout<<"... skipping this event (multiplicity too low) ..."<<endl;\r
+ } else if (dMult>fMaxAllowedMultiplicity)\r
+   {\r
+    cout<<"... skipping this event (multiplicity too high) ..."<<endl;\r
+   } else \r
+     { \r
+      cout<<"... evaluating nested loops (using particle weights) ..."<<endl;\r
+     } \r
+      \r
+ // 1-particle correction terms using particle weights:       \r
+ if(nPrim>=1 && nPrim<=fMaxAllowedMultiplicity)\r
+ {\r
+  for(Int_t i1=0;i1<nPrim;i1++)\r
+  {\r
+   aftsTrack=anEvent->GetTrack(i1);\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi1=aftsTrack->Phi();\r
+   if(fUsePhiWeights && fPhiWeights) wPhi1 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi1*fnBinsPhi/TMath::TwoPi())));\r
+   // 1-particle correction terms using particle weights:
+   if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[0]->Fill(0.5,sin(n*phi1),wPhi1); // <w1 sin(n*phi1)>\r
+   if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[1]->Fill(0.5,cos(n*phi1),wPhi1); // <w1 cos(n*phi1)>\r
+  } // end of for(Int_t i1=0;i1<nPrim;i1++)
+ } // end of if(nPrim>=1 && nPrim<=fMaxAllowedMultiplicity) 
+ // 2-particle correction terms using particle weights:       \r
+ if(nPrim>=2 && nPrim<=fMaxAllowedMultiplicity)\r
+ {\r
+  for(Int_t i1=0;i1<nPrim;i1++)\r
+  {\r
+   aftsTrack=anEvent->GetTrack(i1);\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi1=aftsTrack->Phi();\r
+   if(fUsePhiWeights && fPhiWeights) wPhi1 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi1*fnBinsPhi/TMath::TwoPi())));\r
+   for(Int_t i2=0;i2<nPrim;i2++)\r
+   {\r
+    if(i2==i1)continue;\r
+    aftsTrack=anEvent->GetTrack(i2);\r
+    if(!(aftsTrack->InRPSelection())) continue;\r
+    phi2=aftsTrack->Phi();\r
+    if(fUsePhiWeights && fPhiWeights) wPhi2 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi2*fnBinsPhi/TMath::TwoPi())));   \r
+    if(nPrim==2) cout<<i1<<" "<<i2<<"\r"<<flush;\r
+    // 2-p correction terms using particle weights:    
+    if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[0]->Fill(1.5,sin(n*(phi1+phi2)),wPhi1*wPhi2); // <w1 w2 sin(n*(phi1+phi2))>\r
+    if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[1]->Fill(1.5,cos(n*(phi1+phi2)),wPhi1*wPhi2); // <w1 w2 cos(n*(phi1+phi2))>\r
+   } // end of for(Int_t i2=0;i2<nPrim;i2++)\r
+  } // end of for(Int_t i1=0;i1<nPrim;i1++)\r
+ } // end of if(nPrim>=2)\r
+\r
+ // 3-particle correction terms using particle weights:       \r
+ if(nPrim>=3 && nPrim<=fMaxAllowedMultiplicity)\r
+ { \r
+  for(Int_t i1=0;i1<nPrim;i1++)\r
+  {\r
+   aftsTrack=anEvent->GetTrack(i1);\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi1=aftsTrack->Phi();\r
+   if(fUsePhiWeights && fPhiWeights) wPhi1 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi1*fnBinsPhi/TMath::TwoPi())));\r
+   for(Int_t i2=0;i2<nPrim;i2++)\r
+   {\r
+    if(i2==i1)continue;\r
+    aftsTrack=anEvent->GetTrack(i2);\r
+    if(!(aftsTrack->InRPSelection())) continue;\r
+    phi2=aftsTrack->Phi();\r
+    if(fUsePhiWeights && fPhiWeights) wPhi2 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi2*fnBinsPhi/TMath::TwoPi())));\r
+    for(Int_t i3=0;i3<nPrim;i3++)\r
+    {\r
+     if(i3==i1||i3==i2)continue;\r
+     aftsTrack=anEvent->GetTrack(i3);\r
+     if(!(aftsTrack->InRPSelection())) continue;\r
+     phi3=aftsTrack->Phi();\r
+     if(fUsePhiWeights && fPhiWeights) wPhi3 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi3*fnBinsPhi/TMath::TwoPi())));\r
+     if(nPrim==3) cout<<i1<<" "<<i2<<" "<<i3<<"\r"<<flush;\r
+     // 3-p correction terms using particle weights:    
+     if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[0]->Fill(2.5,sin(n*(phi1-phi2-phi3)),wPhi1*wPhi2*wPhi3); // <w1 w2 w3 sin(n*(phi1-phi2-phi3))>\r
+     if(fUsePhiWeights) fIntFlowDirectCorrectionTermsForNUA[1]->Fill(2.5,cos(n*(phi1-phi2-phi3)),wPhi1*wPhi2*wPhi3); // <w1 w2 w3 cos(n*(phi1-phi2-phi3))>\r
+    } // end of for(Int_t i3=0;i3<nPrim;i3++)\r
+   } // end of for(Int_t i2=0;i2<nPrim;i2++)\r
+  } // end of for(Int_t i1=0;i1<nPrim;i1++)\r
+ } // end of if(nPrim>=3)\r
\r
+ /*
+ if(nPrim>=4 && nPrim<=fMaxAllowedMultiplicity)\r
+ {\r
+  // 4 nested loops multiparticle correlations using particle weights:       \r
+  for(Int_t i1=0;i1<nPrim;i1++)\r
+  {\r
+   aftsTrack=anEvent->GetTrack(i1);\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi1=aftsTrack->Phi();\r
+   if(fUsePhiWeights && fPhiWeights) wPhi1 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi1*fnBinsPhi/TMath::TwoPi())));\r
+   for(Int_t i2=0;i2<nPrim;i2++)\r
+   {\r
+    if(i2==i1)continue;\r
+    aftsTrack=anEvent->GetTrack(i2);\r
+    if(!(aftsTrack->InRPSelection())) continue;\r
+    phi2=aftsTrack->Phi();\r
+    if(fUsePhiWeights && fPhiWeights) wPhi2 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi2*fnBinsPhi/TMath::TwoPi())));\r
+    for(Int_t i3=0;i3<nPrim;i3++)\r
+    {\r
+     if(i3==i1||i3==i2)continue;\r
+     aftsTrack=anEvent->GetTrack(i3);\r
+     if(!(aftsTrack->InRPSelection())) continue;\r
+     phi3=aftsTrack->Phi();\r
+     if(fUsePhiWeights && fPhiWeights) wPhi3 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi3*fnBinsPhi/TMath::TwoPi())));\r
+     for(Int_t i4=0;i4<nPrim;i4++)\r
+     {\r
+      if(i4==i1||i4==i2||i4==i3)continue;\r
+      aftsTrack=anEvent->GetTrack(i4);\r
+      if(!(aftsTrack->InRPSelection())) continue;\r
+      phi4=aftsTrack->Phi();\r
+      if(fUsePhiWeights && fPhiWeights) wPhi4 = fPhiWeights->GetBinContent(1+(Int_t)(TMath::Floor(phi4*fnBinsPhi/TMath::TwoPi())));\r
+      if(nPrim>=4) cout<<i1<<" "<<i2<<" "<<i3<<" "<<i4<<"\r"<<flush; // to be improved (replace eventually this if statement with if(nPrim==4))\r
+      // 4-p correlations using particle weights:\r
+      if(fUsePhiWeights) fIntFlowDirectCorrelations->Fill(10.5,cos(n*phi1+n*phi2-n*phi3-n*phi4),wPhi1*wPhi2*wPhi3*wPhi4); \r
+      // extra correlations: \r
+      // 2-p extra correlations (do not appear if particle weights are not used):\r
+      // ...\r
+      // 3-p extra correlations (do not appear if particle weights are not used):\r
+      // ...\r
+      // 4-p extra correlations (do not appear if particle weights are not used):\r
+      // ...\r
+     } // end of for(Int_t i4=0;i4<nPrim;i4++) \r
+    } // end of for(Int_t i3=0;i3<nPrim;i3++)\r
+   } // end of for(Int_t i2=0;i2<nPrim;i2++)\r
+  } // end of for(Int_t i1=0;i1<nPrim;i1++)\r
+ } // end of if(nPrim>=4)\r
+
+ */\r
+
+ cout<<endl; \r
+\r
+} // end of void AliFlowAnalysisWithQCumulants::EvaluateIntFlowCorrectionsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* anEvent)\r
+\r
+\r
+//================================================================================================================================
+\r
+\r
+void AliFlowAnalysisWithQCumulants::CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights(TString type, TString ptOrEta)\r
+{\r
+ // Calculate correction terms for non-uniform acceptance for differential flow (cos terms) using particle weights.\r
+ type+=""; // to be removed
+ ptOrEta+=""; // to be removed
+
+// Remark: w1 bellow is a particle weight used only for particles which were flagged both as POI and RP.
\r
+ // Results are stored in fDiffFlowCorrectionTermsForNUAPro[t][pe][1][cti], where cti runs as follows:\r
+ //
+ //  0: <<w1 cos n(psi)>>\r
+ //  1: <<w1 w2 cos n(psi1+phi2)>>\r
+ //  2: <<w1 w2 w3 cos n(psi1+phi2-phi3)>>\r
+ //  3: <<w1 w2 w3 cos n(psi1-phi2-phi3)>>\r
+ //  4:\r
+ //  5:\r
+ //  6:\r
\r
+ /*
+ // multiplicity:\r
+ Double_t dMult = (*fSMpk)(0,0);\r
\r
+ // real and imaginary parts of weighted Q-vectors evaluated in harmonics n, 2n, 3n and 4n: \r
+ Double_t dReQ1n1k = (*fReQ)(0,1);\r
+ Double_t dReQ2n2k = (*fReQ)(1,2);\r
+ Double_t dReQ1n3k = (*fReQ)(0,3);\r
+ //Double_t dReQ4n4k = (*fReQ)(3,4);\r
+ Double_t dImQ1n1k = (*fImQ)(0,1);\r
+ Double_t dImQ2n2k = (*fImQ)(1,2);\r
+ Double_t dImQ1n3k = (*fImQ)(0,3);\r
+ //Double_t dImQ4n4k = (*fImQ)(3,4);\r
+ // S^M_{p,k} (see .h file for the definition of fSMpk):\r
+ Double_t dSM1p1k = (*fSMpk)(0,1);\r
+ Double_t dSM1p2k = (*fSMpk)(0,2);\r
+ Double_t dSM1p3k = (*fSMpk)(0,3);\r
+ Double_t dSM2p1k = (*fSMpk)(1,1);\r
+ Double_t dSM3p1k = (*fSMpk)(2,1);\r
+\r
+ Int_t t = -1; // type flag \r
+ Int_t pe = -1; // ptEta flag\r
\r
+ if(type == "RP")\r
+ {\r
+  t = 0;\r
+ } else if(type == "POI")\r
+   {\r
+    t = 1;\r
+   }\r
+\r
+ if(ptOrEta == "Pt")\r
+ {\r
+  pe = 0;\r
+ } else if(ptOrEta == "Eta")\r
+   {\r
+    pe = 1;\r
+   }\r
+    \r
+ Int_t nBinsPtEta[2] = {fnBinsPt,fnBinsEta};\r
+ Double_t minPtEta[2] = {fPtMin,fEtaMin};\r
+ //Double_t maxPtEta[2] = {fPtMax,fEtaMax};\r
+ Double_t binWidthPtEta[2] = {fPtBinWidth,fEtaBinWidth};\r
+\r
+ // looping over all bins and calculating correction terms: \r
+ for(Int_t b=1;b<=nBinsPtEta[pe];b++)\r
+ {\r
+  // real and imaginary parts of p_{m*n,0} (non-weighted Q-vector evaluated for POIs in particular pt or eta bin): \r
+  Double_t p1n0kRe = 0.;\r
+  Double_t p1n0kIm = 0.;\r
+\r
+  // number of POIs in particular pt or eta bin:\r
+  Double_t mp = 0.;\r
+\r
+  // real and imaginary parts of q_{m*n,0} (non-weighted Q-vector evaluated for particles which are both RPs and POIs in particular pt or eta bin):\r
+  Double_t q1n0kRe = 0.;\r
+  Double_t q1n0kIm = 0.;\r
+  Double_t q2n0kRe = 0.;\r
+  Double_t q2n0kIm = 0.;\r
+\r
+  // real and imaginary parts of q_{m*n,0} (weighted Q-vector evaluated for particles which are both RPs and POIs in particular pt or eta bin):\r
+  Double_t q1n1kRe = 0.;\r
+  Double_t q1n1kIm = 0.;\r
+  Double_t q1n2kRe = 0.;\r
+  Double_t q1n2kIm = 0.;\r
+  Double_t q2n1kRe = 0.;\r
+  Double_t q2n1kIm = 0.;\r
+  Double_t q2n2kRe = 0.;\r
+  Double_t q2n2kIm = 0.;\r
+  
+  // number of particles which are both RPs and POIs in particular pt or eta bin:\r
+  Double_t mq = 0.;\r
+  
+  // s_{1,1}, s_{1,2} and s_{1,3} // to be improved (add explanation)  \r
+  Double_t s1p1k = 0.; \r
+  Double_t s1p2k = 0.; \r
+  Double_t s1p3k = 0.; \r
+  
+  if(type == "POI")\r
+  {\r
+   // p_{m*n,k}:   
+   p1n0kRe = fReRPQ1dEBE[1][pe][0][0]->GetBinContent(fReRPQ1dEBE[1][pe][0][0]->GetBin(b))\r
+           * fReRPQ1dEBE[1][pe][0][0]->GetBinEntries(fReRPQ1dEBE[1][pe][0][0]->GetBin(b));\r
+   p1n0kIm = fImRPQ1dEBE[1][pe][0][0]->GetBinContent(fImRPQ1dEBE[1][pe][0][0]->GetBin(b))  \r
+           * fImRPQ1dEBE[1][pe][0][0]->GetBinEntries(fImRPQ1dEBE[1][pe][0][0]->GetBin(b));
+   mp = fReRPQ1dEBE[1][pe][0][0]->GetBinEntries(fReRPQ1dEBE[1][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)                \r
+   // q_{m*n,k}:\r
+   q1n0kRe = fReRPQ1dEBE[2][pe][0][0]->GetBinContent(fReRPQ1dEBE[2][pe][0][0]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][0][0]->GetBinEntries(fReRPQ1dEBE[2][pe][0][0]->GetBin(b));\r
+   q1n0kIm = fImRPQ1dEBE[2][pe][0][0]->GetBinContent(fImRPQ1dEBE[2][pe][0][0]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][0][0]->GetBinEntries(fImRPQ1dEBE[2][pe][0][0]->GetBin(b));\r
+   q2n0kRe = fReRPQ1dEBE[2][pe][1][0]->GetBinContent(fReRPQ1dEBE[2][pe][1][0]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][1][0]->GetBinEntries(fReRPQ1dEBE[2][pe][1][0]->GetBin(b));\r
+   q2n0kIm = fImRPQ1dEBE[2][pe][1][0]->GetBinContent(fImRPQ1dEBE[2][pe][1][0]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][1][0]->GetBinEntries(fImRPQ1dEBE[2][pe][1][0]->GetBin(b));         \r
+   q1n1kRe = fReRPQ1dEBE[2][pe][0][1]->GetBinContent(fReRPQ1dEBE[2][pe][0][1]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][0][1]->GetBinEntries(fReRPQ1dEBE[2][pe][0][1]->GetBin(b));\r
+   q1n1kIm = fImRPQ1dEBE[2][pe][0][1]->GetBinContent(fImRPQ1dEBE[2][pe][0][1]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][0][1]->GetBinEntries(fImRPQ1dEBE[2][pe][0][1]->GetBin(b));\r
+   q2n2kRe = fReRPQ1dEBE[2][pe][1][2]->GetBinContent(fReRPQ1dEBE[2][pe][1][2]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][1][2]->GetBinEntries(fReRPQ1dEBE[2][pe][1][2]->GetBin(b));\r
+   q2n2kIm = fImRPQ1dEBE[2][pe][1][2]->GetBinContent(fImRPQ1dEBE[2][pe][1][2]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][1][2]->GetBinEntries(fImRPQ1dEBE[2][pe][1][2]->GetBin(b));          
+   mq = fReRPQ1dEBE[2][pe][0][0]->GetBinEntries(fReRPQ1dEBE[2][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)\r
+   // s_{1,1}, s_{1,2} and s_{1,3} // to be improved (add explanation)  \r
+   s1p1k = pow(fs1dEBE[2][pe][1]->GetBinContent(b)*fs1dEBE[2][pe][1]->GetBinEntries(b),1.); \r
+   s1p2k = pow(fs1dEBE[2][pe][2]->GetBinContent(b)*fs1dEBE[2][pe][2]->GetBinEntries(b),1.); \r
+   s1p3k = pow(fs1dEBE[2][pe][3]->GetBinContent(b)*fs1dEBE[2][pe][3]->GetBinEntries(b),1.); 
+   // typeFlag = RP (0) or POI (1):   
+   t = 1; \r
+  }else if(type == "RP")\r
+   {\r
+    // q_{m*n,k}: (Remark: m=1 is 0, k=0 iz zero (to be improved!)) \r
+    q1n2kRe = fReRPQ1dEBE[0][pe][0][2]->GetBinContent(fReRPQ1dEBE[0][pe][0][2]->GetBin(b))\r
+            * fReRPQ1dEBE[0][pe][0][2]->GetBinEntries(fReRPQ1dEBE[0][pe][0][2]->GetBin(b));\r
+    q1n2kIm = fImRPQ1dEBE[0][pe][0][2]->GetBinContent(fImRPQ1dEBE[0][pe][0][2]->GetBin(b))\r
+            * fImRPQ1dEBE[0][pe][0][2]->GetBinEntries(fImRPQ1dEBE[0][pe][0][2]->GetBin(b));\r
+    q2n1kRe = fReRPQ1dEBE[0][pe][1][1]->GetBinContent(fReRPQ1dEBE[0][pe][1][1]->GetBin(b))\r
+            * fReRPQ1dEBE[0][pe][1][1]->GetBinEntries(fReRPQ1dEBE[0][pe][1][1]->GetBin(b));\r
+    q2n1kIm = fImRPQ1dEBE[0][pe][1][1]->GetBinContent(fImRPQ1dEBE[0][pe][1][1]->GetBin(b))\r
+            * fImRPQ1dEBE[0][pe][1][1]->GetBinEntries(fImRPQ1dEBE[0][pe][1][1]->GetBin(b));\r
+    // s_{1,1}, s_{1,2} and s_{1,3} // to be improved (add explanation)  \r
+    s1p1k = pow(fs1dEBE[0][pe][1]->GetBinContent(b)*fs1dEBE[0][pe][1]->GetBinEntries(b),1.); \r
+    s1p2k = pow(fs1dEBE[0][pe][2]->GetBinContent(b)*fs1dEBE[0][pe][2]->GetBinEntries(b),1.); \r
+    s1p3k = pow(fs1dEBE[0][pe][3]->GetBinContent(b)*fs1dEBE[0][pe][3]->GetBinEntries(b),1.); \r
+    \r
+    // to be improved (cross-checked):\r
+    p1n0kRe = fReRPQ1dEBE[0][pe][0][0]->GetBinContent(fReRPQ1dEBE[0][pe][0][0]->GetBin(b))\r
+            * fReRPQ1dEBE[0][pe][0][0]->GetBinEntries(fReRPQ1dEBE[0][pe][0][0]->GetBin(b));\r
+    p1n0kIm = fImRPQ1dEBE[0][pe][0][0]->GetBinContent(fImRPQ1dEBE[0][pe][0][0]->GetBin(b))  \r
+            * fImRPQ1dEBE[0][pe][0][0]->GetBinEntries(fImRPQ1dEBE[0][pe][0][0]->GetBin(b));\r
+    mp = fReRPQ1dEBE[0][pe][0][0]->GetBinEntries(fReRPQ1dEBE[0][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)\r
+    // typeFlag = RP (0) or POI (1): \r
+    t = 0;\r
+  }    \r
+  \r
+  // <<w1 cos n(psi1)>>:\r
+  Double_t cosP1nPsiW1 = 0.;\r
+  if(mp-mq+s1p1k)\r
+  {\r
+   cosP1nPsiW1 = (p1n0kRe-q1n0kRe+q1n1kRe)/(mp-mq+s1p1k);\r
+   \r
+   // fill profile for <<w1 cos n(psi1)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][1][0]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],cosP1nPsiW1,mp-mq+s1p1k);\r
+   // histogram to store <w1 cos n(psi1)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][1][0]->SetBinContent(b,cosP1nPsiW1);\r
+  } // end of if(mp-mq+s1p1k)   \r
+  \r  
+  
+  
+  // <<w1 w2 cos n(psi1+phi2)>>:\r
+  Double_t cosP1nPsiP1nPhi = 0.;\r
+  if(mp*dMult-mq)\r
+  {\r
+   cosP1nPsiP1nPhi = (p1n0kRe*dReQ1n-p1n0kIm*dImQ1n-q2n0kRe)/(mp*dMult-mq);\r
+   // fill profile for <<w1 w2 cos n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][1][1]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],cosP1nPsiP1nPhi,mp*dMult-mq);\r
+   // histogram to store <w1 w2 cos n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][1][1]->SetBinContent(b,cosP1nPsiP1nPhi);\r
+  } // end of if(mp*dMult-mq)   \r
+  \r
+  // <<w1 w2 w3 cos n(psi1+phi2-phi3)>>:\r
+  Double_t cosP1nPsi1P1nPhi2MPhi3 = 0.;\r
+  if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))\r
+  {\r
+   cosP1nPsi1P1nPhi2MPhi3 = (p1n0kRe*(pow(dImQ1n,2.)+pow(dReQ1n,2.)-dMult)\r
+                          - 1.*(q2n0kRe*dReQ1n+q2n0kIm*dImQ1n)  \r
+                          - mq*dReQ1n+2.*q1n0kRe)\r
+                          / (mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // fill profile for <<w1 w2 w3 cos n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][1][2]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],cosP1nPsi1P1nPhi2MPhi3,mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // histogram to store <w1 w2 w3 cos n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][1][2]->SetBinContent(b,cosP1nPsi1P1nPhi2MPhi3);\r
+  } // end of if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))   \r
+  \r
+  // <<w1 w2 w3 cos n(psi1-phi2-phi3)>>:\r
+  Double_t cosP1nPsi1M1nPhi2MPhi3 = 0.;\r
+  if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))\r
+  {\r
+   cosP1nPsi1M1nPhi2MPhi3 = (p1n0kRe*(pow(dReQ1n,2.)-pow(dImQ1n,2.))+2.*p1n0kIm*dReQ1n*dImQ1n\r
+                          - 1.*(p1n0kRe*dReQ2n+p1n0kIm*dImQ2n)  \r
+                          - 2.*mq*dReQ1n+2.*q1n0kRe)\r
+                          / (mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // fill profile for <<w1 w2 w3 cos n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][1][3]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],cosP1nPsi1M1nPhi2MPhi3,mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // histogram to store <w1 w2 w3 cos n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][1][3]->SetBinContent(b,cosP1nPsi1M1nPhi2MPhi3);\r
+  } // end of if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))   \r
+ } // end of for(Int_t b=1;b<=nBinsPtEta[pe];b++)\r
+  
+ */
+} // end of AliFlowAnalysisWithQCumulants::CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights(TString type, TString ptOrEta)
+
+\r
+//================================================================================================================================\r
+\r
+\r
+void AliFlowAnalysisWithQCumulants::CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights(TString type, TString ptOrEta)\r
+{\r
+ // Calculate correction terms for non-uniform acceptance for differential flow (sin terms).\r
+  
+ type+=""; // to be removed
+ ptOrEta+=""; // to be removed
+ // Results are stored in fDiffFlowCorrectionTermsForNUAPro[t][pe][0][cti], where cti runs as follows:\r
+ //  0: <<sin n(psi1)>>\r
+ //  1: <<sin n(psi1+phi2)>>\r
+ //  2: <<sin n(psi1+phi2-phi3)>>\r
+ //  3: <<sin n(psi1-phi2-phi3)>>:\r
+ //  4:\r
+ //  5:\r
+ //  6:\r
+ /*
+ // multiplicity:\r
+ Double_t dMult = (*fSMpk)(0,0);\r
\r
+ // real and imaginary parts of non-weighted Q-vectors evaluated in harmonics n, 2n, 3n and 4n: \r
+ Double_t dReQ1n = (*fReQ)(0,0);\r
+ Double_t dReQ2n = (*fReQ)(1,0);\r
+ //Double_t dReQ3n = (*fReQ)(2,0);\r
+ //Double_t dReQ4n = (*fReQ)(3,0);\r
+ Double_t dImQ1n = (*fImQ)(0,0);\r
+ Double_t dImQ2n = (*fImQ)(1,0);\r
+ //Double_t dImQ3n = (*fImQ)(2,0);\r
+ //Double_t dImQ4n = (*fImQ)(3,0);\r
+\r
+ Int_t t = -1; // type flag \r
+ Int_t pe = -1; // ptEta flag\r
\r
+ if(type == "RP")\r
+ {\r
+  t = 0;\r
+ } else if(type == "POI")\r
+   {\r
+    t = 1;\r
+   }\r
+\r
+ if(ptOrEta == "Pt")\r
+ {\r
+  pe = 0;\r
+ } else if(ptOrEta == "Eta")\r
+   {\r
+    pe = 1;\r
+   }\r
+    \r
+ Int_t nBinsPtEta[2] = {fnBinsPt,fnBinsEta};\r
+ Double_t minPtEta[2] = {fPtMin,fEtaMin};\r
+ //Double_t maxPtEta[2] = {fPtMax,fEtaMax};\r
+ Double_t binWidthPtEta[2] = {fPtBinWidth,fEtaBinWidth};\r
+\r
+ // looping over all bins and calculating correction terms: \r
+ for(Int_t b=1;b<=nBinsPtEta[pe];b++)\r
+ {\r
+  // real and imaginary parts of p_{m*n,0} (non-weighted Q-vector evaluated for POIs in particular pt or eta bin): \r
+  Double_t p1n0kRe = 0.;\r
+  Double_t p1n0kIm = 0.;\r
+\r
+  // number of POIs in particular pt or eta bin:\r
+  Double_t mp = 0.;\r
+\r
+  // real and imaginary parts of q_{m*n,0} (non-weighted Q-vector evaluated for particles which are both RPs and POIs in particular pt or eta bin):\r
+  Double_t q1n0kRe = 0.;\r
+  Double_t q1n0kIm = 0.;\r
+  Double_t q2n0kRe = 0.;\r
+  Double_t q2n0kIm = 0.;\r
+\r
+  // number of particles which are both RPs and POIs in particular pt or eta bin:\r
+  Double_t mq = 0.;\r
+   \r
+  if(type == "POI")\r
+  {\r
+   // q_{m*n,0}:\r
+   q1n0kRe = fReRPQ1dEBE[2][pe][0][0]->GetBinContent(fReRPQ1dEBE[2][pe][0][0]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][0][0]->GetBinEntries(fReRPQ1dEBE[2][pe][0][0]->GetBin(b));\r
+   q1n0kIm = fImRPQ1dEBE[2][pe][0][0]->GetBinContent(fImRPQ1dEBE[2][pe][0][0]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][0][0]->GetBinEntries(fImRPQ1dEBE[2][pe][0][0]->GetBin(b));\r
+   q2n0kRe = fReRPQ1dEBE[2][pe][1][0]->GetBinContent(fReRPQ1dEBE[2][pe][1][0]->GetBin(b))\r
+           * fReRPQ1dEBE[2][pe][1][0]->GetBinEntries(fReRPQ1dEBE[2][pe][1][0]->GetBin(b));\r
+   q2n0kIm = fImRPQ1dEBE[2][pe][1][0]->GetBinContent(fImRPQ1dEBE[2][pe][1][0]->GetBin(b))\r
+           * fImRPQ1dEBE[2][pe][1][0]->GetBinEntries(fImRPQ1dEBE[2][pe][1][0]->GetBin(b));         \r
+                 \r
+   mq = fReRPQ1dEBE[2][pe][0][0]->GetBinEntries(fReRPQ1dEBE[2][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)\r
+  } \r
+  else if(type == "RP")\r
+  {\r
+   // q_{m*n,0}:\r
+   q1n0kRe = fReRPQ1dEBE[0][pe][0][0]->GetBinContent(fReRPQ1dEBE[0][pe][0][0]->GetBin(b))\r
+           * fReRPQ1dEBE[0][pe][0][0]->GetBinEntries(fReRPQ1dEBE[0][pe][0][0]->GetBin(b));\r
+   q1n0kIm = fImRPQ1dEBE[0][pe][0][0]->GetBinContent(fImRPQ1dEBE[0][pe][0][0]->GetBin(b))\r
+           * fImRPQ1dEBE[0][pe][0][0]->GetBinEntries(fImRPQ1dEBE[0][pe][0][0]->GetBin(b));\r
+   q2n0kRe = fReRPQ1dEBE[0][pe][1][0]->GetBinContent(fReRPQ1dEBE[0][pe][1][0]->GetBin(b))\r
+           * fReRPQ1dEBE[0][pe][1][0]->GetBinEntries(fReRPQ1dEBE[0][pe][1][0]->GetBin(b));\r
+   q2n0kIm = fImRPQ1dEBE[0][pe][1][0]->GetBinContent(fImRPQ1dEBE[0][pe][1][0]->GetBin(b))\r
+           * fImRPQ1dEBE[0][pe][1][0]->GetBinEntries(fImRPQ1dEBE[0][pe][1][0]->GetBin(b));         \r
+                 \r
+   mq = fReRPQ1dEBE[0][pe][0][0]->GetBinEntries(fReRPQ1dEBE[0][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)  \r
+  }    \r
+  if(type == "POI")\r
+  {\r
+   // p_{m*n,0}:\r
+   p1n0kRe = fReRPQ1dEBE[1][pe][0][0]->GetBinContent(fReRPQ1dEBE[1][pe][0][0]->GetBin(b))\r
+           * fReRPQ1dEBE[1][pe][0][0]->GetBinEntries(fReRPQ1dEBE[1][pe][0][0]->GetBin(b));\r
+   p1n0kIm = fImRPQ1dEBE[1][pe][0][0]->GetBinContent(fImRPQ1dEBE[1][pe][0][0]->GetBin(b))  \r
+           * fImRPQ1dEBE[1][pe][0][0]->GetBinEntries(fImRPQ1dEBE[1][pe][0][0]->GetBin(b));\r
+            \r
+   mp = fReRPQ1dEBE[1][pe][0][0]->GetBinEntries(fReRPQ1dEBE[1][pe][0][0]->GetBin(b)); // to be improved (cross-checked by accessing other profiles here)\r
+    \r
+   t = 1; // typeFlag = RP or POI\r
+  }\r
+  else if(type == "RP")\r
+  {\r
+   // p_{m*n,0} = q_{m*n,0}:\r
+   p1n0kRe = q1n0kRe; \r
+   p1n0kIm = q1n0kIm; \r
+           \r
+   mp = mq; \r
+   \r
+   t = 0; // typeFlag = RP or POI\r
+  }\r
+\r
+  // <<sin n(psi1)>>:\r
+  Double_t sinP1nPsi = 0.;\r
+  if(mp)\r
+  {\r
+   sinP1nPsi = p1n0kIm/mp;\r
+   // fill profile for <<sin n(psi1)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][0][0]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],sinP1nPsi,mp);\r
+   // histogram to store <sin n(psi1)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][0][0]->SetBinContent(b,sinP1nPsi);\r
+  } // end of if(mp)   \r
+  \r
+  // <<sin n(psi1+phi2)>>:\r
+  Double_t sinP1nPsiP1nPhi = 0.;\r
+  if(mp*dMult-mq)\r
+  {\r
+   sinP1nPsiP1nPhi = (p1n0kRe*dImQ1n+p1n0kIm*dReQ1n-q2n0kIm)/(mp*dMult-mq);\r
+   // fill profile for <<sin n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][0][1]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],sinP1nPsiP1nPhi,mp*dMult-mq);\r
+   // histogram to store <sin n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][0][1]->SetBinContent(b,sinP1nPsiP1nPhi);\r
+  } // end of if(mp*dMult-mq)   \r
+  \r
+  // <<sin n(psi1+phi2-phi3)>>:\r
+  Double_t sinP1nPsi1P1nPhi2MPhi3 = 0.;\r
+  if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))\r
+  {\r
+   sinP1nPsi1P1nPhi2MPhi3 = (p1n0kIm*(pow(dImQ1n,2.)+pow(dReQ1n,2.)-dMult)\r
+                          - 1.*(q2n0kIm*dReQ1n-q2n0kRe*dImQ1n)  \r
+                          - mq*dImQ1n+2.*q1n0kIm)\r
+                          / (mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // fill profile for <<sin n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][0][2]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],sinP1nPsi1P1nPhi2MPhi3,mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // histogram to store <sin n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][0][2]->SetBinContent(b,sinP1nPsi1P1nPhi2MPhi3);\r
+  } // end of if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))   \r
+  \r
+  // <<sin n(psi1-phi2-phi3)>>:\r
+  Double_t sinP1nPsi1M1nPhi2MPhi3 = 0.;\r
+  if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))\r
+  {\r
+   sinP1nPsi1M1nPhi2MPhi3 = (p1n0kIm*(pow(dReQ1n,2.)-pow(dImQ1n,2.))-2.*p1n0kRe*dReQ1n*dImQ1n\r
+                          - 1.*(p1n0kIm*dReQ2n-p1n0kRe*dImQ2n)\r
+                          + 2.*mq*dImQ1n-2.*q1n0kIm)\r
+                          / (mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // fill profile for <<sin n(psi1+phi2)>>:\r
+   fDiffFlowCorrectionTermsForNUAPro[t][pe][0][3]->Fill(minPtEta[pe]+(b-1)*binWidthPtEta[pe],sinP1nPsi1M1nPhi2MPhi3,mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.));\r
+   // histogram to store <sin n(psi1+phi2)> e-b-e (needed in some other methods):\r
+   fDiffFlowCorrectionTermsForNUAEBE[t][pe][0][3]->SetBinContent(b,sinP1nPsi1M1nPhi2MPhi3);\r
+  } // end of if(mq*(dMult-1.)*(dMult-2.)+(mp-mq)*dMult*(dMult-1.))   \r
+ } // end of for(Int_t b=1;b<=nBinsPtEta[pe];b++)\r
\r
+ */
+
+} // end of AliFlowAnalysisWithQCumulants::CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights(TString type, TString ptOrEta)\r
+\r
+\r
+//================================================================================================================================\r
+\r
+   \r
+void AliFlowAnalysisWithQCumulants::EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* anEvent, TString type, TString ptOrEta)\r
+{\r
+ // Evaluate with nested loops correction terms for non-uniform acceptance 
+ // with using particle weights (both sin and cos terms) relevant for differential flow.\r
\r
+ anEvent->NumberOfTracks(); // to be removed
+ ptOrEta+=""; // to be removed
+ type+=""; // to be removed
+ // Remark 1: "w1" in expressions bellow is a particle weight used only for particles which were 
+ //           flagged both as POI and RP.
+ // Remark 2: Reduced correction terms for non-uniform acceptance are evaluated in pt bin number fCrossCheckInPtBinNo \r
+ //           and eta bin number fCrossCheckInEtaBinNo both for RPs and POIs.\r
+ // Remark 3: Results are stored in 1 bin profiles fDiffFlowDirectCorrections[t][pe][sc][cti], where first three indices runs as: \r
+ //           [0=RP,1=POI][0=Pt,1=Eta][0=sin terms,1=cos terms], whilst the cti (correction term index) runs as follows: \r
+ //  cti: \r
+ //    0: <<w1 sc n(psi1)>>\r
+ //    1: <<w1 w2 sc n(psi1+phi2)>> \r
+ //    2: <<w1 w2 w3 sc n(psi1+phi2-phi3)>>\r
+ //    3: <<w1 w2 w3 sc n(psi1-phi2-phi3)>>\r
+ //    4:\r
+ //    5:\r
+ //    6:\r
+    \r
+ /*
+ Int_t typeFlag = -1;\r
+ Int_t ptEtaFlag = -1;\r
+ if(type == "RP")\r
+ {\r
+  typeFlag = 0;\r
+ } else if(type == "POI")\r
+   {\r
+    typeFlag = 1;\r
+   }      \r
+ if(ptOrEta == "Pt")\r
+ {\r
+  ptEtaFlag = 0;\r
+ } else if(ptOrEta == "Eta")\r
+   {\r
+    ptEtaFlag = 1;\r
+   } \r
+ // shortcuts:\r
+ Int_t t = typeFlag;\r
+ Int_t pe = ptEtaFlag;\r
+      \r
+ Double_t lowerPtEtaEdge[2] = {fPtMin+(fCrossCheckInPtBinNo-1)*fPtBinWidth,fEtaMin+(fCrossCheckInEtaBinNo-1)*fEtaBinWidth};\r
+ Double_t upperPtEtaEdge[2] = {fPtMin+fCrossCheckInPtBinNo*fPtBinWidth,fEtaMin+fCrossCheckInEtaBinNo*fEtaBinWidth};\r
+ Double_t binWidthPtEta[2] = {fPtBinWidth,fEtaBinWidth};\r
\r
+ Int_t nPrim = anEvent->NumberOfTracks(); \r
+ AliFlowTrackSimple *aftsTrack = NULL;\r
\r
+ Double_t psi1=0., phi2=0., phi3=0.;// phi4=0.;// phi5=0., phi6=0., phi7=0., phi8=0.;\r
\r
+ Int_t n = fHarmonic; \r
\r
+ // 1-particle correction terms:\r
+ for(Int_t i1=0;i1<nPrim;i1++)\r
+ {\r
+  aftsTrack=anEvent->GetTrack(i1);\r
+  // POI condition (first particle in the correlator must be POI): // to be improved (this can be implemented much better)\r
+  if(ptOrEta == "Pt")\r
+  { \r
+   if(!((aftsTrack->Pt()>=lowerPtEtaEdge[pe] && aftsTrack->Pt()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection()))) continue;\r
+  } else if (ptOrEta == "Eta")\r
+    {\r
+     if(!((aftsTrack->Eta()>=lowerPtEtaEdge[pe] && aftsTrack->Eta()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection()))) continue;    \r
+    }\r
+  psi1=aftsTrack->Phi(); \r
+  // sin terms: \r
+  fDiffFlowDirectCorrectionTermsForNUA[t][pe][0][0]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,sin(n*psi1),1.); // <<sin(n*(psi1))>>  \r
+  // cos terms: \r
+  fDiffFlowDirectCorrectionTermsForNUA[t][pe][1][0]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,cos(n*psi1),1.); // <<cos(n*(psi1))>>  \r
+ }//end of for(Int_t i1=0;i1<nPrim;i1++)\r
+   \r
+ // 2-particle correction terms:\r
+ for(Int_t i1=0;i1<nPrim;i1++)\r
+ {\r
+  aftsTrack=anEvent->GetTrack(i1);\r
+  // POI condition (first particle in the correlator must be POI): // to be improved (this can be implemented much better)\r
+  if(ptOrEta == "Pt")\r
+  { \r
+   if(!((aftsTrack->Pt()>=lowerPtEtaEdge[pe] && aftsTrack->Pt()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection()))) continue;\r
+  } else if (ptOrEta == "Eta")\r
+    {\r
+     if(!((aftsTrack->Eta()>=lowerPtEtaEdge[pe] && aftsTrack->Eta()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection()))) continue;    \r
+    }\r
+  psi1=aftsTrack->Phi(); \r
+  for(Int_t i2=0;i2<nPrim;i2++)\r
+  {\r
+   if(i2==i1) continue;\r
+   aftsTrack=anEvent->GetTrack(i2);\r
+   // RP condition (!(first) particle in the correlator must be RP):\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi2=aftsTrack->Phi();   \r
+   // sin terms: \r
+   fDiffFlowDirectCorrectionTermsForNUA[t][pe][0][1]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,sin(n*(psi1+phi2)),1.); // <<sin(n*(psi1+phi2))>>  \r
+   // cos terms: \r
+   fDiffFlowDirectCorrectionTermsForNUA[t][pe][1][1]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,cos(n*(psi1+phi2)),1.); // <<cos(n*(psi1+phi2))>>  \r
+  }//end of for(Int_t i2=0;i2<nPrim;i2++)\r
+ }//end of for(Int_t i1=0;i1<nPrim;i1++)   \r
\r
+ // 3-particle correction terms:\r
+ for(Int_t i1=0;i1<nPrim;i1++)\r
+ {\r
+  aftsTrack=anEvent->GetTrack(i1);\r
+  // POI condition (first particle in the correlator must be POI): // to be improved (this can be implemented much better)\r
+  if(ptOrEta == "Pt")\r
+  { \r
+   if(!((aftsTrack->Pt()>=lowerPtEtaEdge[pe] && aftsTrack->Pt()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection())))continue;\r
+  } else if (ptOrEta == "Eta")\r
+    {\r
+     if(!((aftsTrack->Eta()>=lowerPtEtaEdge[pe] && aftsTrack->Eta()<upperPtEtaEdge[pe]) && (aftsTrack->InPOISelection())))continue;    \r
+    }\r
+  psi1=aftsTrack->Phi();\r
+  for(Int_t i2=0;i2<nPrim;i2++)\r
+  {\r
+   if(i2==i1) continue;\r
+   aftsTrack=anEvent->GetTrack(i2);\r
+   // RP condition (!(first) particle in the correlator must be RP):\r
+   if(!(aftsTrack->InRPSelection())) continue;\r
+   phi2=aftsTrack->Phi();\r
+   for(Int_t i3=0;i3<nPrim;i3++)\r
+   {\r
+    if(i3==i1||i3==i2) continue;\r
+    aftsTrack=anEvent->GetTrack(i3);\r
+    // RP condition (!(first) particle in the correlator must be RP):\r
+    if(!(aftsTrack->InRPSelection())) continue;\r
+    phi3=aftsTrack->Phi();\r
+    // sin terms: \r
+    fDiffFlowDirectCorrectionTermsForNUA[t][pe][0][2]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,sin(n*(psi1+phi2-phi3)),1.); // <<sin(n*(psi1+phi2-phi3))>>  \r
+    fDiffFlowDirectCorrectionTermsForNUA[t][pe][0][3]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,sin(n*(psi1-phi2-phi3)),1.); // <<sin(n*(psi1-phi2-phi3))>>  \r
+    // cos terms: \r
+    fDiffFlowDirectCorrectionTermsForNUA[t][pe][1][2]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,cos(n*(psi1+phi2-phi3)),1.); // <<cos(n*(psi1+phi2-phi3))>>  \r
+    fDiffFlowDirectCorrectionTermsForNUA[t][pe][1][3]->Fill(lowerPtEtaEdge[pe]+binWidthPtEta[pe]/2.,cos(n*(psi1-phi2-phi3)),1.); // <<cos(n*(psi1-phi2-phi3))>>  \r
+   }//end of for(Int_t i3=0;i3<nPrim;i3++)  \r
+  }//end of for(Int_t i2=0;i2<nPrim;i2++)  \r
+ }//end of for(Int_t i1=0;i1<nPrim;i1++)\r
+ */ 
+               \r
+} // end of void AliFlowAnalysisWithQCumulants::EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* anEvent, TString type, TString ptOrEta)\r
+\r
+\r
+
+
+
index c1a57c948cf191e872f0ca24f75f33cfcc3804ec..9f43dbac68ee2fbcd991c08fde0e59094e377ab7 100644 (file)
@@ -57,6 +57,7 @@ class AliFlowAnalysisWithQCumulants{
     virtual void BookEverythingForNestedLoops();   
     virtual void StoreIntFlowFlags();
     virtual void StoreDiffFlowFlags();
+    virtual void StoreFlagsForDistributions();   
     virtual void StoreHarmonic();
   // 2.) method Make() and methods called within Make():
   virtual void Make(AliFlowEventSimple *anEvent);
@@ -66,25 +67,30 @@ class AliFlowAnalysisWithQCumulants{
     virtual void ResetEventByEventQuantities();
     // 2b.) integrated flow:
     virtual void CalculateIntFlowCorrelations(); 
+    virtual void CalculateIntFlowCorrelationsUsingParticleWeights();
     virtual void CalculateIntFlowProductOfCorrelations();
     virtual void CalculateIntFlowSumOfEventWeights();
     virtual void CalculateIntFlowSumOfProductOfEventWeights();
-    virtual void CalculateIntFlowCorrectionsForNUASinTerms();  
     virtual void CalculateIntFlowCorrectionsForNUACosTerms();
+    virtual void CalculateIntFlowCorrectionsForNUACosTermsUsingParticleWeights();
+    virtual void CalculateIntFlowCorrectionsForNUASinTerms();  
+    virtual void CalculateIntFlowCorrectionsForNUASinTermsUsingParticleWeights();    
     // ...  
-    virtual void CalculateIntFlowCorrelationsUsingParticleWeights();
     virtual void CalculateWeightedQProductsForIntFlow();
     virtual void EvaluateIntFlowCorrelationsWithNestedLoops(AliFlowEventSimple* const anEvent); 
     virtual void EvaluateIntFlowCorrelationsWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* const anEvent); 
-    virtual void EvaluateIntFlowCorrectionsForNUAWithNestedLoops(AliFlowEventSimple* const anEvent);  
+    virtual void EvaluateIntFlowCorrectionsForNUAWithNestedLoops(AliFlowEventSimple* const anEvent); 
+    virtual void EvaluateIntFlowCorrectionsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* const anEvent);
     // 2c.) differential flow:
     virtual void CalculateDiffFlowCorrelations(TString type, TString ptOrEta); // type = RP or POI
     virtual void CalculateDiffFlowCorrelationsUsingParticleWeights(TString type, TString ptOrEta); // type = RP or POI 
     virtual void CalculateDiffFlowProductOfCorrelations(TString type, TString ptOrEta); // type = RP or POI
     virtual void CalculateDiffFlowSumOfEventWeights(TString type, TString ptOrEta); // type = RP or POI
     virtual void CalculateDiffFlowSumOfProductOfEventWeights(TString type, TString ptOrEta); // type = RP or POI
-    virtual void CalculateDiffFlowCorrectionsForNUASinTerms(TString type, TString ptOrEta);  
     virtual void CalculateDiffFlowCorrectionsForNUACosTerms(TString type, TString ptOrEta);
+    virtual void CalculateDiffFlowCorrectionsForNUACosTermsUsingParticleWeights(TString type, TString ptOrEta);
+    virtual void CalculateDiffFlowCorrectionsForNUASinTerms(TString type, TString ptOrEta);  
+    virtual void CalculateDiffFlowCorrectionsForNUASinTermsUsingParticleWeights(TString type, TString ptOrEta);  
     // ...
     //virtual void CalculateCorrelationsForDifferentialFlow2D(TString type); // type = RP or POI
     //virtual void CalculateCorrectionsForNonUniformAcceptanceForDifferentialFlowCosTerms(TString type); // type = RP or POI  
@@ -92,6 +98,9 @@ class AliFlowAnalysisWithQCumulants{
     virtual void EvaluateDiffFlowCorrelationsWithNestedLoops(AliFlowEventSimple* const anEvent, TString type, TString ptOrEta);
     virtual void EvaluateDiffFlowCorrelationsWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* const anEvent, TString type, TString ptOrEta); 
     virtual void EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoops(AliFlowEventSimple* const anEvent, TString type, TString ptOrEta);
+    virtual void EvaluateDiffFlowCorrectionTermsForNUAWithNestedLoopsUsingParticleWeights(AliFlowEventSimple* const anEvent, TString type, TString ptOrEta);
+    // 2d.) distributions of correlations:
+    virtual void StoreDistributionsOfCorrelations();
   // 3.) method Finish() and methods called within Finish():
   virtual void Finish();
     // 3a.) integrated flow:
@@ -268,6 +277,22 @@ class AliFlowAnalysisWithQCumulants{
   void SetDiffFlowCorrectionTermsForNUAHist(TH1D* const dfctfnh, Int_t const i, Int_t const j, Int_t const k, Int_t const l) {this->fDiffFlowCorrectionTermsForNUAHist[i][j][k][l] = dfctfnh;};
   TH1D* GetDiffFlowCorrectionTermsForNUAHist(Int_t i, Int_t j, Int_t k, Int_t l) const {return this->fDiffFlowCorrectionTermsForNUAHist[i][j][k][l];};  
   
+  // 5.) distributions of correlations:
+  // flags:
+  void SetStoreDistributions(Bool_t const storeDistributions) {this->fStoreDistributions = storeDistributions;};
+  Bool_t GetStoreDistributions() const {return this->fStoreDistributions;};
+  // profile:
+  void SetDistributionsFlags(TProfile* const distributionsFlags) {this->fDistributionsFlags = distributionsFlags;};
+  TProfile* GetDistributionsFlags() const {return this->fDistributionsFlags;};  
+  // histograms:
+  void SetDistributions(TH1D* const distributions, Int_t const i) {this->fDistributions[i] = distributions;};
+  TH1D* GetDistributions(Int_t i) const {return this->fDistributions[i];};  
+  // min and max values of correlations (ci is correlations index [0=<2>,1=<4>,2=<6>,3=<8>]):
+  void SetMinValueOfCorrelation(Int_t const ci, Double_t const minValue) {this->fMinValueOfCorrelation[ci] = minValue;};
+  Double_t GetMinValueOfCorrelation(Int_t ci) const {return this->fMinValueOfCorrelation[ci];};
+  void SetMaxValueOfCorrelation(Int_t const ci, Double_t const maxValue) {this->fMaxValueOfCorrelation[ci] = maxValue;};
+  Double_t GetMaxValueOfCorrelation(Int_t ci) const {return this->fMaxValueOfCorrelation[ci];};
+    
   // x.) debugging and cross-checking:
   void SetNestedLoopsList(TList* const nllist) {this->fNestedLoopsList = nllist;};
   TList* GetNestedLoopsList() const {return this->fNestedLoopsList;}; 
@@ -351,7 +376,7 @@ class AliFlowAnalysisWithQCumulants{
   //  3c.) event-by-event quantities:
   TMatrixD *fReQ; // fReQ[m][k] = sum_{i=1}^{M} w_{i}^{k} cos(m*phi_{i})
   TMatrixD *fImQ; // fImQ[m][k] = sum_{i=1}^{M} w_{i}^{k} sin(m*phi_{i})
-  TMatrixD *fSMpk; // fSM[p][k] = (sum_{i=1}^{M} w_{i}^{k})^{p}
+  TMatrixD *fSMpk; // fSM[p][k] = (sum_{i=1}^{M} w_{i}^{k})^{p+1}
   TH1D *fIntFlowCorrelationsEBE; // 1st bin: <2>, 2nd bin: <4>, 3rd bin: <6>, 4th bin: <8>
   TH1D *fIntFlowEventWeightsForCorrelationsEBE; // 1st bin: eW_<2>, 2nd bin: eW_<4>, 3rd bin: eW_<6>, 4th bin: eW_<8>
   TH1D *fIntFlowCorrelationsAllEBE; // to be improved (add comment)
@@ -429,8 +454,12 @@ class AliFlowAnalysisWithQCumulants{
   TProfile2D *fCorrectionTermsPro[2][2][2][2][2]; // [0=RP,1=POI][0=pW not used,1=pW used][0=e eW,1=ne eW][0=sin terms,1=cos terms][corr. terms' index]
         
   // 5.) distributions:
-  TList *fDistributionsList; // list to hold all distributions
-  TH1D *fDistributions[2][2][4]; // [0=pWeights not used,1=pWeights used][0=exact eWeights,1=non-exact eWeights][0=<2>,1=<4>,2=<6>,3=<8>]
+  TList *fDistributionsList; // list to hold all distributions of correlations
+  TProfile *fDistributionsFlags; // profile to hold all flags for distributions of correlations
+  Bool_t fStoreDistributions; // store or not distributions of correlations
+  TH1D *fDistributions[4]; // [0=distribution of <2>,1=distribution of <4>,2=distribution of <6>,3=distribution of <8>]
+  Double_t fMinValueOfCorrelation[4]; // min values of <2>, <4>, <6> and <8>
+  Double_t fMaxValueOfCorrelation[4]; // max values of <2>, <4>, <6> and <8>
     
   // x.) debugging and cross-checking:
   TList *fNestedLoopsList; // list to hold all profiles filled with nested loops
index e8dd694722c23978cd92e382432c892e279948e0..7d55bb4b87a438082324b51cca727ec848cce0b5 100644 (file)
@@ -1,11 +1,12 @@
-// Macro mergeOutput.C is used to merge output files. Its usage relies
-// on the following directory structure and naming conventions:
+// Macro mergeOutput.C is used to merge output files "AnalysisResults.root" locally. 
+// (To merge output files "AnalysisResults.root" on Grid use macro mergeOutputOnGrid.C.)
+// Its usage relies on the following directory structure and naming conventions:
 //  1.) In directory <dir> there are subdirectories <subdir1>, <subdir2>, ...;   
-//  2.) In each subdirectory <subdir*> there is a file named "AnalysisResults.root";
-//  3.) Copy macro mergeOutput.C in directory <dir> and launch it. It will 
-//      automatically access from each subdirectory a file "AnalysisResults.root"
-//      and by making use of TFileMerger utility it will produce a merged, large
-//      statisics file "mergedAnalysisResults.root" and save it in directory <dir>;
+//  2.) In subdirectory <subdir*> there is a file named "AnalysisResults.root";
+//  3.) Copy macro mergeOutput.C in directory <dir> and launch it. It will automatically 
+//      access from each subdirectory a file "AnalysisResults.root" and by making use of 
+//      TFileMerger utility it will produce a merged file "mergedAnalysisResults.root" 
+//      and save it in directory <dir>;
 //  4.) IMPORTANT: Macro mergeOutput.C must be used in a combination with macro redoFinish.C
 //      in order to get the analysis done on merged, large statistics sample. In particular,
 //      after you got the file "mergedAnalysisResults.root", copy macro redoFinish.C in 
 //  5.) REMARK: To see plots for some of the results use macro compareFlowResults.C. This macro
 //      accesses file "AnalysisResults.root" and produces couple of predefined example plots.        
 
-enum libModes {mLocal,mLocalSource};
-
-void mergeOutput(Int_t mode=mLocal)
+void mergeOutput()
 {
- // mode: if mode = mLocal: analyze data on your computer using aliroot
- //       if mode = mLocalSource: analyze data on your computer using root + source files
-
- TString outputFileName = "AnalysisResults.root"; // hardwired name of the output files to be merged
- const Int_t cycle = 10; // merging is done in cycles and this is the cycle period 
-
- if(cycle>100)
+ // Name of the output files to be merged:
+ TString outputFileName = "AnalysisResults.root";
+ // Name of the merged, large statistics file:
+ TString mergedFileName = "mergedAnalysisResults.root";
+ // Load needed flow libraries:
+ gSystem->AddIncludePath("-I$ROOTSYS/include");
+ gSystem->AddIncludePath("-I$ALICE_ROOT/include");
+ gSystem->Load("libPWG2flowCommon");
+ cerr<<"Library \"libPWG2flowCommon\" loaded ...."<<endl;
+ // For a large number of output files merging is done in cycles
+ // and this is the cycle period: 
+ const Int_t cycle = 500;
+ if(cycle>500)
  {
   cout<<"WARNING: Cycle is too big !!!!"<<endl; 
-  cout<<"         Set const Int_t cycle to smaller value in the macro."<<endl;
-  exit(0);
- }
- if(cycle<=0)
- {
-  cout<<"WARNING: Cycle must be a positive integer !!!!"<<endl;
-  cout<<"         Set const Int_t cycle to a positive integer in the macro."<<endl;
+  cout<<"         Set \"const Int_t cycle\" to smaller value in the macro."<<endl;
   exit(0);
  }
-
- // load needed libraries:                       
- LoadLibrariesMO(mode);  
-
- // standard magic:
+ // Standard magic:
  TString *baseDirPath = new TString(gSystem->pwd());
  TSystemDirectory *baseDir = new TSystemDirectory(".",baseDirPath->Data());          
  TList *listOfFilesInBaseDir = baseDir->GetListOfFiles();
+ TStopwatch timer;
+ timer.Start();
  // listOfFilesInBaseDir->Print();
  Int_t nFiles = listOfFilesInBaseDir->GetEntries();
- // loop over all files and add files AnalysisResults.root to TFileMerger:
- gSystem->cd(baseDirPath->Data());
+ // loop over all files and from each subdirectory add file AnalysisResults.root to TFileMerger:
  Int_t fileCounter = 0;
- TFileMerger *fileMerger = new TFileMerger();
+ TFileMerger *fileMerger = new TFileMerger(); 
  for(Int_t iFile=0;iFile<nFiles;iFile++)
  {
-  TSystemFile *presentFile = (TSystemFile*)listOfFilesInBaseDir->At(iFile);
-  // consider only subdirectories: 
-  if(!presentFile || !presentFile->IsDirectory() || strcmp(presentFile->GetName(), ".") == 0 || 
-     strcmp(presentFile->GetName(), "..") == 0) continue; 
-  
-  TString presentDirName = baseDirPath->Data();
-  (presentDirName+="/")+=presentFile->GetName();
-  presentDirName+="/";
-  // accessing the output AnalysisResults.root file in current subdirectory:
-  TString fileName = presentDirName; 
+  TSystemFile *currentFile = (TSystemFile*)listOfFilesInBaseDir->At(iFile);
+  // Consider only subdirectories: 
+  if(!currentFile || 
+     !currentFile->IsDirectory() || 
+     strcmp(currentFile->GetName(), ".") == 0 || 
+     strcmp(currentFile->GetName(), "..") == 0) continue; 
+  // Accessing the output file "AnalysisResults.root" in current subdirectory: 
+  TString currentSubDirName = baseDirPath->Data();
+  (currentSubDirName+="/")+=currentFile->GetName();
+  currentSubDirName+="/";
+  TString fileName = currentSubDirName; 
   fileName+=outputFileName.Data();
-  
   if(!(gSystem->AccessPathName(fileName.Data(),kFileExists)))
   {
    fileCounter++;
    fileMerger->AddFile(fileName.Data());
-   // merging in cycles:
+   // Merging in cycles:
    if(fileCounter % cycle == 0)
    {
-    TString *mergedFileForPreviousCycle = new TString("mergedAnalysisResults"); 
+    // If the merged output from previous cycle exists add it to TFileMerger:
+    TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
     (*mergedFileForPreviousCycle)+=(fileCounter/cycle - 1);
     (*mergedFileForPreviousCycle)+=".root";
-    
-    // if the merged output from previous cycle exists add it to TFileMerger:
     if(!(gSystem->AccessPathName(mergedFileForPreviousCycle->Data(),kFileExists)))
     {
      fileMerger->AddFile(mergedFileForPreviousCycle->Data());
-     // delete merged output from previous cycle:
-     TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),gSystem->pwd());
+     // Delete merged output from previous cycle:
+     TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),baseDirPath->Data());
      file->Delete();
      delete file;
-    }
-    
-    // create merged output for current cycle:
-    TString *mergedFileForCurrentCycle = new TString("mergedAnalysisResults"); 
+    }    
+    // Create merged output for current cycle:
+    TString *mergedFileForCurrentCycle = new TString("mergedCycle"); 
     (*mergedFileForCurrentCycle)+=(fileCounter/cycle);
-    (*mergedFileForCurrentCycle)+=".root";
-    
+    (*mergedFileForCurrentCycle)+=".root";    
     fileMerger->OutputFile(mergedFileForCurrentCycle->Data());
     fileMerger->Merge();
-    fileMerger->Reset();
-    
+    fileMerger->Reset();    
     delete mergedFileForPreviousCycle;
     delete mergedFileForCurrentCycle;
    } // end of if(fileCounter % cycle == 0) 
   } // end of if(!(gSystem->AccessPathName(fileName.Data(),kFileExists))) 
-  
  } // end of for(Int_t iFile=0;iFile<nFiles;iFile++)
  
  
  //=================================================================================================
  
  
- // final merging at the end of the day:
+ // Final merging at the end of the day (3 distinct cases):
+ if(fileCounter==0)
+ {
+  cout<<endl;
+  cout<<"Merger wasn't lucky: Couldn't find a single file "<<outputFileName.Data()<<" to merge"<<endl;
+  cout<<"in subdirectories of directory "<<baseDirPath->Data()<<endl;
+  cout<<endl;
+  exit(0);
+ }
  gSystem->cd(baseDirPath->Data());
-
  if(fileCounter < cycle)
  {
-  TString *mergedFileFinal = new TString("mergedAnalysisResults"); 
-  (*mergedFileFinal)+=".root";
-  fileMerger->OutputFile(mergedFileFinal->Data());
+  fileMerger->OutputFile(mergedFileName.Data());
   fileMerger->Merge();
-  delete mergedFileFinal;
- } else if (fileCounter % cycle == 0)
+ } else if(fileCounter % cycle == 0)
    {
-    TString *mergedFileForPreviousCycle = new TString("mergedAnalysisResults"); 
+    TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
     (*mergedFileForPreviousCycle)+=(fileCounter/cycle);
     (*mergedFileForPreviousCycle)+=".root";    
     if(!(gSystem->AccessPathName(mergedFileForPreviousCycle->Data(),kFileExists)))
     {
-     TString *mergedFileFinal = new TString("mergedAnalysisResults"); 
-     (*mergedFileFinal)+=".root";
-     gSystem->Rename(mergedFileForPreviousCycle->Data(),mergedFileFinal->Data());
-    }    
+     gSystem->Rename(mergedFileForPreviousCycle->Data(),mergedFileName.Data());
+    }     
    } else
      {
-      TString *mergedFileForPreviousCycle = new TString("mergedAnalysisResults"); 
+      TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
       (*mergedFileForPreviousCycle)+=((Int_t)fileCounter/cycle);
-      (*mergedFileForPreviousCycle)+=".root";
-      
+      (*mergedFileForPreviousCycle)+=".root";      
       fileMerger->AddFile(mergedFileForPreviousCycle->Data());
-      
-      // delete merged output from previous cycle:
-      TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),gSystem->pwd());
+      // Delete merged output from previous cycle:
+      TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),baseDirPath->Data());
       file->Delete();
       delete file;
-      
-      TString *mergedFileFinal = new TString("mergedAnalysisResults"); 
-      (*mergedFileFinal)+=".root";
-      fileMerger->OutputFile(mergedFileFinal->Data());
+      fileMerger->OutputFile(mergedFileName.Data());
       fileMerger->Merge();
-           
       delete mergedFileForPreviousCycle;
-      delete mergedFileFinal;
      }
  delete fileMerger;
  delete baseDirPath;
  delete baseDir;
-
-} // end of void mergeOutput(Int_t mode=mLocal)
-
-void LoadLibrariesMO(const libModes mode) {
-  
-  //--------------------------------------
-  // Load the needed libraries most of them already loaded by aliroot
-  //--------------------------------------
-  //gSystem->Load("libTree");
-  gSystem->Load("libGeom");
-  gSystem->Load("libVMC");
-  gSystem->Load("libXMLIO");
-  gSystem->Load("libPhysics");
-  
-  //----------------------------------------------------------
-  // >>>>>>>>>>> Local mode <<<<<<<<<<<<<< 
-  //----------------------------------------------------------
-  if (mode==mLocal) {
-    //--------------------------------------------------------
-    // If you want to use already compiled libraries 
-    // in the aliroot distribution
-    //--------------------------------------------------------
-
-  //==================================================================================  
-  //load needed libraries:
-  gSystem->AddIncludePath("-I$ROOTSYS/include");
-  //gSystem->Load("libTree");
-
-  // for AliRoot
-  gSystem->AddIncludePath("-I$ALICE_ROOT/include");
-  gSystem->Load("libANALYSIS");
-  gSystem->Load("libPWG2flowCommon");
-  //cerr<<"libPWG2flowCommon loaded ..."<<endl;
-  
-  }
-  
-  else if (mode==mLocalSource) {
  
-    // In root inline compile
-  
-    // Constants  
-    gROOT->LoadMacro("AliFlowCommon/AliFlowCommonConstants.cxx+");
-    gROOT->LoadMacro("AliFlowCommon/AliFlowLYZConstants.cxx+");
-    gROOT->LoadMacro("AliFlowCommon/AliFlowCumuConstants.cxx+");
-    
-    // Flow event
-    gROOT->LoadMacro("AliFlowCommon/AliFlowVector.cxx+"); 
-    gROOT->LoadMacro("AliFlowCommon/AliFlowTrackSimple.cxx+");    
-    gROOT->LoadMacro("AliFlowCommon/AliFlowEventSimple.cxx+");
-    
-    // Cuts
-    gROOT->LoadMacro("AliFlowCommon/AliFlowTrackSimpleCuts.cxx+");    
-    
-    // Output histosgrams
-    gROOT->LoadMacro("AliFlowCommon/AliFlowCommonHist.cxx+");
-    gROOT->LoadMacro("AliFlowCommon/AliFlowCommonHistResults.cxx+");
-    gROOT->LoadMacro("AliFlowCommon/AliFlowLYZHist1.cxx+");
-    gROOT->LoadMacro("AliFlowCommon/AliFlowLYZHist2.cxx+");
-       
-    cout << "finished loading macros!" << endl;  
-    
-  } // end of else if (mode==mLocalSource) 
-  
-} // end of void LoadLibrariesMO(const libModes mode)
+ cout<<endl;
+ timer.Stop();
+ timer.Print(); 
+ cout<<endl;
+ if(!(gSystem->AccessPathName(mergedFileName.Data(),kFileExists)))
+ {
+  cout<<"Merging went successfully: "<<fileCounter<<" files "<<outputFileName.Data()<<" were"<<endl;
+  cout<<"merged into the newly created file "<<mergedFileName.Data()<<"."<<endl;
+  cout<<endl;
+  cout<<"Launch now macro redoFinish.C to get the correct final results."<<endl;
+ } else
+   {
+    cout<<"WARNING: Merging failed !!!!"<<endl;
+   } 
+ cout<<endl;
+} // End of void mergeOutput()
diff --git a/PWG2/FLOW/macros/mergeOutputOnGrid.C b/PWG2/FLOW/macros/mergeOutputOnGrid.C
new file mode 100644 (file)
index 0000000..0632515
--- /dev/null
@@ -0,0 +1,108 @@
+// Macro mergeOutputOnGrid.C is used to merge output files "AnalysisResults.root" 
+// from the AliEn file catalog. It produces the merged, larged statistics file
+// "mergedAnalysisResults.root". To get the final flow estimates for the large
+// statistics file use macro redoFinish.C (see comments in macro mergeOutput.C
+// for more details). 
+
+void mergeOutputOnGrid(const char* gridPath = "/alice/cern.ch/user/a/abilandz/flowAnalysisOnGrid/output*") 
+{
+ // Name of the output files to be merged:
+ TString outputFileName = "AnalysisResults.root";
+ // Name of the merged, large statistics file:
+ TString mergedFileName = "mergedAnalysisResults.root";
+ // Load needed flow libraries:
+ gSystem->AddIncludePath("-I$ROOTSYS/include");
+ gSystem->AddIncludePath("-I$ALICE_ROOT/include");
+ gSystem->Load("libPWG2flowCommon");
+ cerr<<"Library \"libPWG2flowCommon\" loaded ...."<<endl;
+ // Connect to the GRID:
+ TGrid::Connect("alien://");  
+ // Query the file catalog and get a TGridResult:
+ TString queryPattern = outputFileName;
+ TGridResult *result = gGrid->Query(gridPath,queryPattern.Data());
+ Int_t nEntries = result->GetEntries();
+ Printf("\nFound %d files %s in %s ....\n",nEntries,outputFileName.Data(),gridPath);   
+ // Create a TFileMerger:
+ TFileMerger *fileMerger = new TFileMerger(); 
+ // For a large number of output files merging is done in cycles
+ // and this is the cycle period: 
+ const Int_t cycle = 500;
+ if(cycle>500)
+ {
+  cout<<"WARNING: Cycle is too big !!!!"<<endl; 
+  cout<<"         Set \"const Int_t cycle\" to smaller value in the macro."<<endl;
+  exit(0);
+ }
+ Int_t fileCounter = 0;
+ TString *baseDirPath = new TString(gSystem->pwd());
+ // Loop over the TGridResult's entries and add the files to the TFileMerger:
+ TString alienUrl;
+ for(Int_t i=0;i<nEntries;i++) 
+ {
+  alienUrl = result->GetKey(i,"turl");
+  //Printf("%s",alienUrl.Data());
+  fileMerger->AddFile(alienUrl.Data());
+  fileCounter++;
+  // Merging in cycles:
+  if(fileCounter % cycle == 0)
+  {
+   // If the merged output from previous cycle exists add it to TFileMerger:
+   TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
+   (*mergedFileForPreviousCycle)+=(fileCounter/cycle - 1);
+   (*mergedFileForPreviousCycle)+=".root";
+   if(!(gSystem->AccessPathName(mergedFileForPreviousCycle->Data(),kFileExists)))
+   {
+    fileMerger->AddFile(mergedFileForPreviousCycle->Data());
+    // Delete merged output from previous cycle:
+    TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),baseDirPath->Data());
+    file->Delete();
+    delete file;
+   }    
+   // Create merged output for current cycle:
+   TString *mergedFileForCurrentCycle = new TString("mergedCycle"); 
+   (*mergedFileForCurrentCycle)+=(fileCounter/cycle);
+   (*mergedFileForCurrentCycle)+=".root";    
+   fileMerger->OutputFile(mergedFileForCurrentCycle->Data());
+   fileMerger->Merge();
+   fileMerger->Reset();    
+   delete mergedFileForPreviousCycle;
+   delete mergedFileForCurrentCycle;
+  } // end of if(fileCounter % cycle == 0) 
+ } // end of for(Int_t i=0;i<nEntries;i++) 
+
+ //=================================================================================================
+ // Final merging at the end of the day (3 distinct cases):
+ gSystem->cd(baseDirPath->Data());
+ if(fileCounter < cycle)
+ {
+  fileMerger->OutputFile(mergedFileName.Data());
+  fileMerger->Merge();
+ } else if(fileCounter % cycle == 0)
+   {
+    TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
+    (*mergedFileForPreviousCycle)+=(fileCounter/cycle);
+    (*mergedFileForPreviousCycle)+=".root";    
+    if(!(gSystem->AccessPathName(mergedFileForPreviousCycle->Data(),kFileExists)))
+    {
+     gSystem->Rename(mergedFileForPreviousCycle->Data(),mergedFileName.Data());
+    }     
+   } else
+     {
+      TString *mergedFileForPreviousCycle = new TString("mergedCycle"); 
+      (*mergedFileForPreviousCycle)+=((Int_t)fileCounter/cycle);
+      (*mergedFileForPreviousCycle)+=".root";      
+      fileMerger->AddFile(mergedFileForPreviousCycle->Data());
+      // Delete merged output from previous cycle:
+      TSystemFile *file = new TSystemFile(mergedFileForPreviousCycle->Data(),baseDirPath->Data());
+      file->Delete();
+      delete file;
+      fileMerger->OutputFile(mergedFileName.Data());
+      fileMerger->Merge();
+      delete mergedFileForPreviousCycle;
+     }
+ delete fileMerger;
+ delete baseDirPath;
+}
index ce4cb70763cf956a409dd3919d6df2a4b42ec9e0..71a634654b65c85e6d1dd9b9e14e4a8a2184ba55 100644 (file)
@@ -3,23 +3,27 @@
 
 enum libModes {mLocal,mLocalSource};
 
-void redoFinish(TString type="", Int_t mode=mLocal)
+void redoFinish(TString type="ESD", Int_t mode=mLocal)
 {
  // type: type of analysis can be ESD, AOD, MC, ESDMC0, ESDMC1
  //       (if type="" output files are from MC simulation (default))
  // mode: if mode = mLocal: analyze data on your computer using aliroot
  //       if mode = mLocalSource: analyze data on your computer using root + source files
 
- TString mergedFileName = "mergedAnalysisResults.root"; // hardwired name of merged, large statistics file obtained with macro mergeOutput.C 
- TString outputFileName = "AnalysisResults.root"; // final output file name holding final results for large statistics sample
+ // Name of merged, large statistics file obtained with macro mergeOutput.C: 
+ TString mergedFileName = "mergedAnalysisResults.root";
+ // Final output file name holding final results for large statistics sample:
+ TString outputFileName = "AnalysisResults.root"; 
  
  const Int_t nMethods = 10;
  TString method[nMethods] = {"MCEP","SP","GFC","QC","FQD","LYZ1SUM","LYZ1PROD","LYZ2SUM","LYZ2PROD","LYZEP"};
  
- // load needed libraries:                       
+ // Load needed libraries:                       
  LoadLibrariesRF(mode);  
-  
- // access the merged, large statistics file obtained with macro mergeOutput.C:
+   
+ // Accessing the merged, large statistics file obtained with macro mergeOutput.C.
+ // On this file the flow analysis will be redone, its content modified to store
+ // the correct final results and eventually renamed into TString outputFileName.
  TString pwd(gSystem->pwd());
  pwd+="/";
  pwd+=mergedFileName.Data();
@@ -28,51 +32,48 @@ void redoFinish(TString type="", Int_t mode=mLocal)
  {
   cout<<"WARNING: You do not have a merged output file:"<<endl;
   cout<<"         "<<pwd.Data()<<endl;
+  cout<<endl;
+  cout<<"In order to get that file use macro mergeOutput.C first."<<endl;
+  cout<<endl;
   exit(0);
  } else 
    {
-    mergedFile = TFile::Open(pwd.Data(),"READ");
+    // Create temporarily copy of "mergedAnalysisResults.root":
+    TSystemFile *fileTemp = new TSystemFile(mergedFileName.Data(),".");
+    fileTemp->Copy("mergedAnalysisResultsTemp.root");
+    delete fileTemp;
+    // Access merged file:
+    mergedFile = TFile::Open(pwd.Data(),"UPDATE");
    }
-
- // access from mergedFile the merged files for each method and from them the lists holding histograms:
+   
+ // Access from mergedFile the merged files for each method and from them the lists holding histograms:
  TString fileName[nMethods]; 
  TDirectoryFile *dirFile[nMethods] = {NULL}; 
  TString listName[nMethods]; 
  TList *list[nMethods] = {NULL};
  for(Int_t i=0;i<nMethods;i++)
  {
-  // form a file name for each method:
+  // Form a file name for each method:
   fileName[i]+="output";
   fileName[i]+=method[i].Data();
   fileName[i]+="analysis";
   fileName[i]+=type.Data();
-  // access this file:
+  // Access this file:
   dirFile[i] = (TDirectoryFile*)mergedFile->FindObjectAny(fileName[i].Data());
-  // form a list name for each method:
+  // Form a list name for each method:
   listName[i]+="cobj";
   listName[i]+=method[i].Data();
-  // access this list and close the file:
+  // Access this list:
   if(dirFile[i])
   {
    dirFile[i]->GetObject(listName[i].Data(),list[i]);
-   dirFile[i]->Close();
-  }
- } 
- // close the mergedFile:
- mergedFile->Close();
- // create a new file which will hold the final results of all methods:
- TFile *outputFile = new TFile(outputFileName.Data(),"RECREATE");
+  } else 
+    {
+     cout<<"WARNING: Couldn't find a file "<<fileName[i].Data()<<".root !!!!"<<endl;
+    }
+ } // End of for(Int_t i=0;i<nMethods;i++)
 
- // create a new file for each method wich will hold list with final results:
- TDirectoryFile *dirFileFinal[nMethods] = {NULL};
- for(Int_t i=0;i<nMethods;i++)
- {
-  if(dirFile[i]) dirFileFinal[i] = new TDirectoryFile(fileName[i].Data(),fileName[i].Data());
- } 
-
- // redo finish for each method (REMARK: this implementation can be dramatically improved!):
+ // Redo finish for each method (REMARK: this implementation can be dramatically improved!):
  // MCEP:
  for(Int_t i=0;i<nMethods;i++)
  {
@@ -81,8 +82,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithMCEventPlane* mcep = new AliFlowAnalysisWithMCEventPlane();
    mcep->GetOutputHistograms(list[i]);
    mcep->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   } 
   // SP:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjSP")==0)
@@ -90,8 +91,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithScalarProduct* sp = new AliFlowAnalysisWithScalarProduct();
    sp->GetOutputHistograms(list[i]);
    sp->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   } 
   // GFC:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjGFC")==0)
@@ -99,8 +100,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithCumulants* gfc = new AliFlowAnalysisWithCumulants();
    gfc->GetOutputHistograms(list[i]);
    gfc->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   } 
   // QC:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjQC")==0)
@@ -108,8 +109,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithQCumulants* qc = new AliFlowAnalysisWithQCumulants();
    qc->GetOutputHistograms(list[i]);
    qc->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   } 
   // FQD:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjFQD")==0)
@@ -117,8 +118,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithFittingQDistribution* fqd = new AliFlowAnalysisWithFittingQDistribution();
    fqd->GetOutputHistograms(list[i]);
    fqd->Finish(kTRUE);
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   }
   // LYZ1SUM:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjLYZ1SUM")==0)
@@ -128,8 +129,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    lyz1sum->SetUseSum(kTRUE);       
    lyz1sum->GetOutputHistograms(list[i]);
    lyz1sum->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   } 
   // LYZ2SUM:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjLYZ2SUM")==0)
@@ -139,8 +140,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    lyz2sum->SetUseSum(kTRUE);       
    lyz2sum->GetOutputHistograms(list[i]);
    lyz2sum->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   }
   // LYZ1PROD:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjLYZ1PROD")==0)
@@ -150,8 +151,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    lyz1prod->SetUseSum(kFALSE);       
    lyz1prod->GetOutputHistograms(list[i]);
    lyz1prod->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   }    
   // LYZ2PROD:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjLYZ2PROD")==0)
@@ -161,8 +162,8 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    lyz2prod->SetUseSum(kFALSE);       
    lyz2prod->GetOutputHistograms(list[i]);
    lyz2prod->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   }
   // LYZEP:
   else if(list[i] && strcmp(list[i]->GetName(),"cobjLYZEP")==0)
@@ -170,16 +171,24 @@ void redoFinish(TString type="", Int_t mode=mLocal)
    AliFlowAnalysisWithLYZEventPlane* lyzep = new AliFlowAnalysisWithLYZEventPlane();
    lyzep->GetOutputHistograms(list[i]);
    lyzep->Finish();
-   dirFileFinal[i]->Add(list[i]);
-   dirFileFinal[i]->Write(dirFileFinal[i]->GetName(),TObject::kSingleKey);
+   dirFile[i]->Add(list[i],kTRUE);
+   dirFile[i]->Write(dirFile[i]->GetName(),TObject::kSingleKey+TObject::kOverwrite);
   }                
- } // end of for(Int_t i=0;i<nMethods;i++)
+ } // End of for(Int_t i=0;i<nMethods;i++)
 
- // close the final output file:
- outputFile->Close();
- delete outputFile;
+ // Close the final output file:
+ mergedFile->Close();
+ delete mergedFile;
+ // Giving the final names:
+ TSystemFile *outputFileFinal = new TSystemFile(mergedFileName.Data(),".");
+ outputFileFinal->Rename(outputFileName.Data());
+ delete outputFileFinal; 
+ TSystemFile *mergedFileFinal = new TSystemFile("mergedAnalysisResultsTemp.root",".");
+ mergedFileFinal->Rename(mergedFileName.Data());
+ delete mergedFileFinal;
      
-} // end of void reCallFinish(Int_t mode=mLocal)
+} // End of void redoFinish(Int_t mode=mLocal)
  
 void LoadLibrariesRF(const libModes mode) {