Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
181 changes: 94 additions & 87 deletions PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <Math/GenVector/Boost.h>
#include <Math/Vector3D.h>
#include <Math/Vector4D.h>
#include <TLorentzVector.h>

Check failure on line 33 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[root/lorentz-vector]

Do not use the TLorentzVector legacy class. Use std::array with RecoDecay methods or the ROOT::Math::LorentzVector template instead.
#include <TMath.h>
#include <TRandom3.h>

Expand All @@ -40,7 +40,7 @@
#include <cmath> // for std::fabs
#include <cstdint>
#include <deque>
#include <iostream>

Check failure on line 43 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[include-iostream]

Do not include iostream. Use O2 logging instead.
#include <iterator>
#include <limits>
#include <random>
Expand Down Expand Up @@ -531,7 +531,6 @@
const ROOT::Math::PtEtaPhiMVector& daughpart1, const ROOT::Math::PtEtaPhiMVector& daughpart2,
int datatype, float mixpairweight, int mixedLeg)
{

auto lambda1Mass = 0.0;
auto lambda2Mass = 0.0;
if (!usePDGM) {
Expand Down Expand Up @@ -622,35 +621,29 @@
double epsWeightMixedLeg = 1.0;
if (useweight && datatype == 1) { // only for ME
if (mixedLeg == 1) {
// Only leg 1 is from the mixing pool
double w1 = 1.0;
if (tag1 == 0) { // Λ
if (hweight1) {
w1 = hweight1->GetBinContent(hweight1->FindBin(dphi1, deta1, pt1));
}
} else { // Λbar
if (hweight4) {
w1 = hweight4->GetBinContent(hweight4->FindBin(dphi1, deta1, pt1));
}
}
if (w1 > 0.0 && std::isfinite(w1)) {
epsWeightMixedLeg = w1;
if (tag1 == 0 && tag2 == 0) {
w1 = hweight1->GetBinContent(hweight1->FindBin(dphi1, deta1, pt1));
} else if (tag1 == 0 && tag2 == 1) {
w1 = hweight2->GetBinContent(hweight2->FindBin(dphi1, deta1, pt1));
} else if (tag1 == 1 && tag2 == 0) {
w1 = hweight3->GetBinContent(hweight3->FindBin(dphi1, deta1, pt1));
} else if (tag1 == 1 && tag2 == 1) {
w1 = hweight4->GetBinContent(hweight4->FindBin(dphi1, deta1, pt1));
}
epsWeightMixedLeg = w1;
} else if (mixedLeg == 2) {
// Only leg 2 is from the mixing pool
double w2 = 1.0;
if (tag2 == 0) { // Λ
if (hweight12) {
w2 = hweight12->GetBinContent(hweight12->FindBin(dphi2, deta2, pt2));
}
} else { // Λbar
if (hweight42) {
w2 = hweight42->GetBinContent(hweight42->FindBin(dphi2, deta2, pt2));
}
}
if (w2 > 0.0 && std::isfinite(w2)) {
epsWeightMixedLeg = w2;
if (tag1 == 0 && tag2 == 0) {
w2 = hweight12->GetBinContent(hweight12->FindBin(dphi2, deta1, pt2));
} else if (tag1 == 0 && tag2 == 1) {
w2 = hweight22->GetBinContent(hweight22->FindBin(dphi2, deta1, pt2));
} else if (tag1 == 1 && tag2 == 0) {
w2 = hweight32->GetBinContent(hweight32->FindBin(dphi2, deta1, pt2));
} else if (tag1 == 1 && tag2 == 1) {
w2 = hweight42->GetBinContent(hweight42->FindBin(dphi2, deta2, pt2));
}
epsWeightMixedLeg = w2;
}
}

Expand Down Expand Up @@ -697,46 +690,55 @@
} else if (datatype == 1) {
double weight = mixpairweight;
if (useweight) {
if (usebothweight) {
weight = mixpairweight / (epsWeightMixedLeg);
} else {
weight = mixpairweight / (epsWeightMixedLeg);
}
weight = mixpairweight / (epsWeightMixedLeg);
}
if (weight <= 0.0) {
weight = 1.0;
}
// LOGF(info, Form("Getting alignment offsets from the CCDB...%2.2f",epsWeightMixedLeg));
histos.fill(HIST("hPtYMix"), particle1.Pt(), particle1.Rapidity(), weight);
if (tag1 == 0 && tag2 == 0) {
histos.fill(HIST("ME_LL"), dphi1, deta1, pt1, mixpairweight);
histos.fill(HIST("ME_LL2"), dphi2, deta2, pt2, mixpairweight);
if (mixedLeg == 1) {
histos.fill(HIST("ME_LL"), dphi1, deta1, pt1, weight);
} else if (mixedLeg == 2) {
histos.fill(HIST("ME_LL2"), dphi2, deta2, pt2, weight);
}
histos.fill(HIST("hSparseLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaR, weight);
if (useAdditionalHisto) {
histos.fill(HIST("hSparseRapLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaRap, weight);
histos.fill(HIST("hSparsePhiLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, dphi_pair, weight);
histos.fill(HIST("hSparsePairMassLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, pairDummy.M(), weight);
}
} else if (tag1 == 0 && tag2 == 1) {
histos.fill(HIST("ME_LAL"), dphi1, deta1, pt1, mixpairweight);
histos.fill(HIST("ME_LAL2"), dphi2, deta2, pt2, mixpairweight);
if (mixedLeg == 1) {
histos.fill(HIST("ME_LAL"), dphi1, deta1, pt1, weight);
} else if (mixedLeg == 2) {
histos.fill(HIST("ME_LAL2"), dphi2, deta2, pt2, weight);
}
histos.fill(HIST("hSparseLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaR, weight);
if (useAdditionalHisto) {
histos.fill(HIST("hSparseRapLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaRap, weight);
histos.fill(HIST("hSparsePhiLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, dphi_pair, weight);
histos.fill(HIST("hSparsePairMassLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, pairDummy.M(), weight);
}
} else if (tag1 == 1 && tag2 == 0) {
histos.fill(HIST("ME_ALL"), dphi1, deta1, pt1, mixpairweight);
histos.fill(HIST("ME_ALL2"), dphi2, deta2, pt2, mixpairweight);
if (mixedLeg == 1) {
histos.fill(HIST("ME_ALL"), dphi1, deta1, pt1, weight);
} else if (mixedLeg == 2) {
histos.fill(HIST("ME_ALL2"), dphi2, deta2, pt2, weight);
}
histos.fill(HIST("hSparseAntiLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaR, weight);
if (useAdditionalHisto) {
histos.fill(HIST("hSparseRapAntiLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaRap, weight);
histos.fill(HIST("hSparsePhiAntiLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, dphi_pair, weight);
histos.fill(HIST("hSparsePairMassAntiLambdaLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, pairDummy.M(), weight);
}
} else if (tag1 == 1 && tag2 == 1) {
histos.fill(HIST("ME_ALAL"), dphi1, deta1, pt1, mixpairweight);
histos.fill(HIST("ME_ALAL2"), dphi2, deta2, pt2, mixpairweight);
if (mixedLeg == 1) {
histos.fill(HIST("ME_ALAL"), dphi1, deta1, pt1, weight);
} else if (mixedLeg == 2) {
histos.fill(HIST("ME_ALAL2"), dphi2, deta2, pt2, weight);
}
histos.fill(HIST("hSparseAntiLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaR, weight);
if (useAdditionalHisto) {
histos.fill(HIST("hSparseRapAntiLambdaAntiLambdaMixed"), particle1.M(), particle2.M(), cosThetaDiff, deltaRap, weight);
Expand Down Expand Up @@ -816,7 +818,7 @@
{
auto collOldIndex = -999;
std::vector<bool> t1Used;
for (auto& [collision1, collision2] : selfCombinations(colBinning, nEvtMixing, -1, collisions, collisions)) {

Check failure on line 821 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
// LOGF(info, "Mixed event collisions: (%d, %d)", collision1.index(), collision2.index());
// auto centrality = collision1.cent();
auto groupV01 = V0s.sliceBy(tracksPerCollisionV0, collision1.index());
Expand All @@ -830,7 +832,7 @@
// std::vector<bool> t1Used(groupV01.size(), false); // <-- reset here
collOldIndex = collNewIndex;
}
for (auto& [t1, t3] : soa::combinations(o2::soa::CombinationsFullIndexPolicy(groupV01, groupV03))) {

Check failure on line 835 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
if (t1Used[t1.index()]) {
continue;
}
Expand Down Expand Up @@ -885,7 +887,7 @@
auto nBins = colBinning.getAllBinsCount();
std::vector<std::deque<std::pair<int, AllTrackCandidates>>> eventPools(nBins);

for (auto& collision1 : collisions) {

Check failure on line 890 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
int bin = colBinning.getBin(std::make_tuple(collision1.posz(), collision1.cent()));
auto poolA = V0s.sliceBy(tracksPerCollisionV0, collision1.index());
// float centrality = collision1.cent();
Expand All @@ -893,7 +895,7 @@
// <<< CHANGED: map old collision index → set of (t2.idx, t3.idx) we've already filled
std::unordered_map<int, std::set<std::pair<int, int>>> seenMap;

for (auto& [t1, t2] : soa::combinations(o2::soa::CombinationsFullIndexPolicy(poolA, poolA))) {

Check failure on line 898 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
if (!selectionV0(t1) || !selectionV0(t2))
continue;
if (t2.index() <= t1.index())
Expand All @@ -909,7 +911,7 @@
AllTrackCandidates& poolB = it->second;

int nRepl = 0;
for (auto& t3 : poolB) {

Check failure on line 914 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
if (selectionV0(t3) && checkKinematics(t1, t3)) {
++nRepl;
}
Expand All @@ -918,7 +920,7 @@
continue;
float invN = 1.0f / static_cast<float>(nRepl);

for (auto& t3 : poolB) {

Check failure on line 923 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
if (!(selectionV0(t3) && checkKinematics(t1, t3))) {
continue;
}
Expand Down Expand Up @@ -972,7 +974,7 @@
auto nBins = colBinning.getAllBinsCount();
std::vector<std::deque<std::pair<int, AllTrackCandidates>>> eventPools(nBins);

for (auto& collision1 : collisions) {

Check failure on line 977 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
const int bin = colBinning.getBin(std::make_tuple(collision1.posz(), collision1.cent()));

// if pool empty, push and continue
Expand All @@ -988,7 +990,7 @@
auto poolA = V0s.sliceBy(tracksPerCollisionV0, collision1.index());

// loop over SE unordered pairs (t1,t2)
for (auto& [t1, t2] : soa::combinations(o2::soa::CombinationsFullIndexPolicy(poolA, poolA))) {

Check failure on line 993 in PWGLF/Tasks/Strangeness/lambdaspincorrderived.cxx

View workflow job for this annotation

GitHub Actions / O2 linter

[const-ref-in-for-loop]

Use constant references for non-modified iterators in range-based for loops.
if (!selectionV0(t1) || !selectionV0(t2))
continue;
if (t2.index() <= t1.index())
Expand Down Expand Up @@ -1064,17 +1066,17 @@
}
PROCESS_SWITCH(lambdaspincorrderived, processMEV3, "Process data ME (first-leg, pair-3D maps)", false);

// ---------------------- minimal helpers you already use ----------------------
static constexpr int N_STATUS = 2; // v0Status ∈ {0,1}

struct MixBinner {
// constructed from the task's configurables; φ is assumed already constrained into [0, 2π)
float ptMin, ptMax, ptStep;
float etaMin, etaMax, etaStep;
float phiMin, phiMax, phiStep;

// Mass binning: [1.09, 1.14) with 50 bins (1e-3 GeV/c^2)
// if you want 1 mass bin (effectively “on/off”), keep nM_=1
static constexpr float mMin = 1.09f;
static constexpr float mMax = 1.14f; // exclusive
static constexpr float mMax = 1.14f;
static constexpr int nM_ = 1;
static constexpr float mStep = (mMax - mMin) / nM_;

Expand All @@ -1088,7 +1090,6 @@
ptStep = (ptStep > 0.f ? ptStep : 0.1f);
etaStep = (etaStep > 0.f ? etaStep : 0.1f);
phiStep = (phiStep > 0.f ? phiStep : 0.1f);

nPt_ = std::max(1, static_cast<int>(std::floor((ptMax - ptMin) / ptStep + 0.5f)));
nEta_ = std::max(1, static_cast<int>(std::floor((etaMax - etaMin) / etaStep + 0.5f)));
nPhi_ = std::max(1, static_cast<int>(std::ceil((phiMax - phiMin) / phiStep)));
Expand All @@ -1108,19 +1109,18 @@
if (b < 0)
return -1;
if (b >= nBins)
b = nBins - 1; // clamp exact-top edge
b = nBins - 1;
return b;
}

inline int ptBin(float pt) const { return binFromValue(pt, ptMin, ptStep, nPt_); }
inline int etaBin(float eta) const { return binFromValue(eta, etaMin, etaStep, nEta_); }
inline int phiBin(float phi) const { return binFromValue(phi, phiMin, phiStep, nPhi_); } // φ already constrained upstream
inline int etaBin(float e) const { return binFromValue(e, etaMin, etaStep, nEta_); }
inline int phiBin(float ph) const { return binFromValue(ph, phiMin, phiStep, nPhi_); }
inline int massBin(float m) const { return binFromValue(m, mMin, mStep, nM_); }
};

struct BufferCand {
int64_t collisionIdx; // from col.index()
int64_t rowIndex; // global row id in V0s
int64_t collisionIdx;
int64_t rowIndex;
uint8_t v0Status;
uint16_t ptBin, etaBin, phiBin, mBin;
};
Expand All @@ -1130,16 +1130,16 @@
int64_t rowIndex;
};

// 6D key: (colBin, status, pt, eta, phi, mass)
static inline size_t linearKey(int colBin, int statBin,
int ptBin, int etaBin, int phiBin, int mBin,
int nStatus, int nPt, int nEta, int nPhi, int nM)
{
return ((((((static_cast<size_t>(colBin) * nStatus + statBin) * nPt + ptBin) * nEta + etaBin) * nPhi + phiBin) * nM + mBin));
}

// ================================ processMEV4 ================================
void processMEV4(EventCandidates const& collisions, AllTrackCandidates const& V0s)
{
// Build binner from your existing configurables
MixBinner mb{
ptMin.value, ptMax.value, ptMix.value, // pT range & step
v0eta.value, etaMix.value, // |eta| max & step
Expand All @@ -1156,9 +1156,7 @@
const size_t nKeys = static_cast<size_t>(nCol) * nStat * nPt * nEta * nPhi * nM;
std::vector<std::vector<BufferCand>> buffer(nKeys);

// =========================
// PASS 1: fill 6D buffer
// =========================
// ---------------- PASS 1: fill the 6D buffer ----------------
for (auto const& col : collisions) {
const int colBin = colBinning.getBin(std::make_tuple(col.posz(), col.cent()));
auto slice = V0s.sliceBy(tracksPerCollisionV0, col.index());
Expand All @@ -1178,9 +1176,7 @@
if (ptB < 0 || etaB < 0 || phiB < 0 || mB < 0)
continue;

const size_t key = linearKey(colBin, status, ptB, etaB, phiB, mB,
nStat, nPt, nEta, nPhi, nM);

const size_t key = linearKey(colBin, status, ptB, etaB, phiB, mB, nStat, nPt, nEta, nPhi, nM);
buffer[key].push_back(BufferCand{
.collisionIdx = static_cast<int64_t>(col.index()),
.rowIndex = static_cast<int64_t>(t.globalIndex()),
Expand All @@ -1192,7 +1188,7 @@
}
}

// Helper: get matches for a given candidate (for mixing one leg)
// small helper (kept local) to fetch matches for a given candidate from the same 6D bin
auto makeMatchesFor = [&](auto const& cand,
int colBinLocal,
int64_t curColIdx) -> std::vector<MatchRef> {
Expand All @@ -1209,33 +1205,21 @@
if (ptB < 0 || etaB < 0 || phiB < 0 || mB < 0)
return matches;

const size_t key = linearKey(colBinLocal, status, ptB, etaB, phiB, mB,
nStat, nPt, nEta, nPhi, nM);
const size_t key = linearKey(colBinLocal, status, ptB, etaB, phiB, mB, nStat, nPt, nEta, nPhi, nM);
auto const& binVec = buffer[key];
if (binVec.empty())
return matches;

matches.reserve(binVec.size());
for (const auto& bc : binVec) {
if (bc.collisionIdx == curColIdx)
continue; // ensure different event
continue; // different event
matches.push_back(MatchRef{bc.collisionIdx, bc.rowIndex});
}

// Optionally cap number of matches by nEvtMixing
const int cap = nEvtMixing.value;
const int n = static_cast<int>(matches.size());
if (cap > 0 && n > cap) {
std::shuffle(matches.begin(), matches.end(), rng);
matches.resize(cap);
}

return matches;
};

// =========================
// PASS 2: mixing over SE pairs (mix both legs)
// =========================
// ---------------- PASS 2: loop over SE pairs and mix both legs ----------------
for (auto const& collision1 : collisions) {
const int colBin = colBinning.getBin(std::make_tuple(collision1.posz(), collision1.cent()));
auto poolA = V0s.sliceBy(tracksPerCollisionV0, collision1.index());
Expand All @@ -1259,20 +1243,38 @@
if (t1.pionIndex() == t2.protonIndex())
continue;

// matches for replacing leg 1 and leg 2
auto matches1 = makeMatchesFor(t1, colBin, curColId); // t1 -> tX
auto matches2 = makeMatchesFor(t2, colBin, curColId); // t2 -> tY
// gather matches for both legs from the same 6D bin
auto matches1 = makeMatchesFor(t1, colBin, curColId); // candidates to replace leg-1
auto matches2 = makeMatchesFor(t2, colBin, curColId); // candidates to replace leg-2

const int n1 = static_cast<int>(matches1.size());
const int n2 = static_cast<int>(matches2.size());
if (n1 == 0 && n2 == 0)
const int n1Eff = static_cast<int>(matches1.size());
const int n2Eff = static_cast<int>(matches2.size());
if (n1Eff == 0 && n2Eff == 0)
continue;

// Split "unit" weight between the two modes
const float w1 = (n1 > 0 ? 0.5f / static_cast<float>(n1) : 0.f);
const float w2 = (n2 > 0 ? 0.5f / static_cast<float>(n2) : 0.f);
// per-leg depth (configurable)
int depth = nEvtMixing.value;
if (depth <= 0)
depth = 1;

const int n1Take = std::min(depth, n1Eff);
const int n2Take = std::min(depth, n2Eff);

// --- Type A: replace leg 1 → (tX, t2) ---
// split unit weight between legs if both have mixes; else single leg gets full
const float w1 = (n1Take > 0 ? ((n2Take > 0 ? 0.5f : 1.0f) / static_cast<float>(n1Take)) : 0.0f);
const float w2 = (n2Take > 0 ? ((n1Take > 0 ? 0.5f : 1.0f) / static_cast<float>(n2Take)) : 0.0f);

// randomize & truncate per-leg matches to requested depth
if (n1Take > 0) {
std::shuffle(matches1.begin(), matches1.end(), rng);
matches1.resize(n1Take);
}
if (n2Take > 0) {
std::shuffle(matches2.begin(), matches2.end(), rng);
matches2.resize(n2Take);
}

// --- Type A: replace leg 1 (t1 -> tX), keep t2
for (const auto& m : matches1) {
auto tX = V0s.iteratorAt(m.rowIndex);
if (!selectionV0(tX))
Expand All @@ -1289,10 +1291,13 @@
const float dPhi = std::fabs(RecoDecay::constrainAngle(lambda1.Phi() - lambda2.Phi(), 0.0F, harmonicDphi));
histos.fill(HIST("deltaPhiMix"), dPhi, w1);

fillHistograms2(tX.v0Status(), t2.v0Status(), lambda1, lambda2, proton1, proton2, 1, w1, 1);
// datatype=1 (ME), mixedLeg=1
fillHistograms2(tX.v0Status(), t2.v0Status(),
lambda1, lambda2, proton1, proton2,
/*datatype=*/1, /*weight=*/w1, /*mixedLeg=*/1);
}

// --- Type B: replace leg 2 → (t1, tY) ---
// --- Type B: replace leg 2 (t2 -> tY), keep t1
for (const auto& m : matches2) {
auto tY = V0s.iteratorAt(m.rowIndex);
if (!selectionV0(tY))
Expand All @@ -1309,13 +1314,15 @@
const float dPhi = std::fabs(RecoDecay::constrainAngle(lambda1.Phi() - lambda2.Phi(), 0.0F, harmonicDphi));
histos.fill(HIST("deltaPhiMix"), dPhi, w2);

fillHistograms2(t1.v0Status(), tY.v0Status(), lambda1, lambda2, proton1, proton2, 1, w2, 2);
// datatype=1 (ME), mixedLeg=2
fillHistograms2(t1.v0Status(), tY.v0Status(),
lambda1, lambda2, proton1, proton2,
/*datatype=*/1, /*weight=*/w2, /*mixedLeg=*/2);
}
}
}
}

PROCESS_SWITCH(lambdaspincorrderived, processMEV4, "Process data ME (5d buffer)", false);
PROCESS_SWITCH(lambdaspincorrderived, processMEV4, "Process data ME (6D buffer, both legs mixed, depth-capped)", false);
};
WorkflowSpec defineDataProcessing(ConfigContext const& cfgc)
{
Expand Down
Loading