diff --git a/.github/workflows/key4hep-build.yaml b/.github/workflows/key4hep-build.yaml index 3f9007f0..b5749f82 100644 --- a/.github/workflows/key4hep-build.yaml +++ b/.github/workflows/key4hep-build.yaml @@ -13,6 +13,9 @@ jobs: matrix: build_type: ["release", "nightly"] image: ["alma9", "ubuntu22", "centos7"] + exclude: + - build_type: "nightly" + image: "centos7" fail-fast: false runs-on: ubuntu-latest steps: diff --git a/RecCalorimeter/src/components/CreateCaloCells.cpp b/RecCalorimeter/src/components/CreateCaloCells.cpp index 59139bc5..4b254956 100644 --- a/RecCalorimeter/src/components/CreateCaloCells.cpp +++ b/RecCalorimeter/src/components/CreateCaloCells.cpp @@ -21,6 +21,7 @@ GaudiAlgorithm(name, svcLoc), m_geoSvc("GeoSvc", name) { declareProperty("hits", m_hits, "Hits from which to create cells (input)"); declareProperty("cells", m_cells, "The created calorimeter cells (output)"); + declareProperty("crosstalksTool", m_crosstalksTool, "Handle for the cell crosstalk tool"); declareProperty("calibTool", m_calibTool, "Handle for tool to calibrate Geant4 energy to EM scale tool"); declareProperty("noiseTool", m_noiseTool, "Handle for the calorimeter cells noise tool"); declareProperty("geometryTool", m_geoTool, "Handle for the geometry tool"); @@ -35,8 +36,16 @@ StatusCode CreateCaloCells::initialize() { info() << "add cell noise : " << m_addCellNoise << endmsg; info() << "remove cells below threshold : " << m_filterCellNoise << endmsg; info() << "add position information to the cell : " << m_addPosition << endmsg; + info() << "emulate crosstalk : " << m_addCrosstalk << endmsg; // Initialization of tools + // Cell crosstalk tool + if (m_addCrosstalk) { + if (!m_crosstalksTool.retrieve()) { + error() << "Unable to retrieve the cell crosstalk tool!!!" << endmsg; + return StatusCode::FAILURE; + } + } // Calibrate Geant4 energy to EM scale tool if (m_doCellCalibration) { if (!m_calibTool.retrieve()) { @@ -67,7 +76,12 @@ StatusCode CreateCaloCells::initialize() { } // Copy over the CellIDEncoding string from the input collection to the output collection - m_cellsCellIDEncoding.put(m_hitsCellIDEncoding.get()); + auto hitsEncoding = m_hitsCellIDEncoding.get_optional(); + if (!hitsEncoding.has_value()) { + error () << "Missing cellID encoding for input collection" << endmsg; + return StatusCode::FAILURE; + } + m_cellsCellIDEncoding.put(hitsEncoding.value()); return StatusCode::SUCCESS; } @@ -84,6 +98,7 @@ StatusCode CreateCaloCells::execute() { m_cellsMap.clear(); } + // 1. Merge energy deposits into cells // If running with noise map already was prepared. Otherwise it is being // created below @@ -93,22 +108,50 @@ StatusCode CreateCaloCells::execute() { } debug() << "Number of calorimeter cells after merging of hits: " << m_cellsMap.size() << endmsg; - // 2. Calibrate simulation energy to EM scale + // 2. Emulate cross-talk (if asked) + if(m_addCrosstalk) { + // Derive the cross-talk contributions without affecting yet the nominal energy + // (one has to emulate crosstalk based on cells free from any cross-talk contributions) + m_CrosstalkCellsMap.clear(); // this is a temporary map to hold energy exchange due to cross-talk, without affecting yet the nominal energy + // loop over cells with nominal energies + for (const auto& this_cell : m_cellsMap) { + uint64_t this_cellId = this_cell.first; + auto vec_neighbours = m_crosstalksTool->getNeighbours(this_cellId); // a vector of neighbour IDs + auto vec_crosstalks = m_crosstalksTool->getCrosstalks(this_cellId); // a vector of crosstalk coefficients + // loop over crosstalk neighbours of the cell under study + for (unsigned int i_cell=0; i_cellcalibrate(m_cellsMap); } - // 3. Add noise to all cells + // 4. Add noise to all cells if (m_addCellNoise) { m_noiseTool->addRandomCellNoise(m_cellsMap); } - // 4. Filter cells + // 5. Filter cells if (m_filterCellNoise) { m_noiseTool->filterCellNoise(m_cellsMap); } - // 5. Copy information to CaloHitCollection + // 6. Copy information to CaloHitCollection edm4hep::CalorimeterHitCollection* edmCellsCollection = new edm4hep::CalorimeterHitCollection(); for (const auto& cell : m_cellsMap) { if (m_addCellNoise || (!m_addCellNoise && cell.second != 0)) { diff --git a/RecCalorimeter/src/components/CreateCaloCells.h b/RecCalorimeter/src/components/CreateCaloCells.h index 2606bc1c..5c3a2724 100644 --- a/RecCalorimeter/src/components/CreateCaloCells.h +++ b/RecCalorimeter/src/components/CreateCaloCells.h @@ -7,6 +7,7 @@ #include "k4Interface/ICalibrateCaloHitsTool.h" #include "k4Interface/ICalorimeterTool.h" #include "k4Interface/INoiseCaloCellsTool.h" +#include "k4Interface/ICaloReadCrosstalkMap.h" // Gaudi #include "GaudiAlg/GaudiAlgorithm.h" @@ -31,14 +32,17 @@ class IGeoSvc; * * Flow of the program: * 1/ Merge Geant4 energy deposits with same cellID - * 2/ Calibrate to electromagnetic scale (if calibration switched on) - * 3/ Add random noise to each cell (if noise switched on) - * 4/ Filter cells and remove those with energy below threshold (if noise + + * 2/ Emulate cross-talk (if switched on) + * 3/ Calibrate to electromagnetic scale (if calibration switched on) + * 4/ Add random noise to each cell (if noise switched on) + * 5/ Filter cells and remove those with energy below threshold (if noise + * filtering switched on) * * Tools called: * - CalibrateCaloHitsTool * - NoiseCaloCellsTool + * - CaloReadCrosstalkMap + * - CalorimeterTool (for geometry) * * @author Jana Faltova * @author Anna Zaborowska @@ -58,6 +62,9 @@ class CreateCaloCells : public GaudiAlgorithm { StatusCode finalize(); private: + + /// Handle for the calorimeter cells crosstalk tool + ToolHandle m_crosstalksTool{"ReadCaloCrosstalkMap", this}; /// Handle for tool to calibrate Geant4 energy to EM scale tool ToolHandle m_calibTool{"CalibrateCaloHitsTool", this}; /// Handle for the calorimeter cells noise tool @@ -65,6 +72,8 @@ class CreateCaloCells : public GaudiAlgorithm { /// Handle for the geometry tool ToolHandle m_geoTool{"TubeLayerPhiEtaCaloTool", this}; + /// Add crosstalk to cells? + Gaudi::Property m_addCrosstalk{this, "addCrosstalk", false, "Add crosstalk effect?"}; /// Calibrate to EM scale? Gaudi::Property m_doCellCalibration{this, "doCellCalibration", true, "Calibrate to EM scale?"}; /// Add noise to cells? @@ -113,8 +122,10 @@ class CreateCaloCells : public GaudiAlgorithm { /// Pointer to the geometry service ServiceHandle m_geoSvc; dd4hep::VolumeManager m_volman; - /// Map of cell IDs (corresponding to DD4hep IDs) and energy + /// Maps of cell IDs (corresponding to DD4hep IDs) on final energies to be used for clustering std::unordered_map m_cellsMap; + /// Maps of cell IDs (corresponding to DD4hep IDs) on transfer of signals due to crosstalk + std::unordered_map m_CrosstalkCellsMap; }; #endif /* RECCALORIMETER_CREATECALOCELLS_H */ diff --git a/RecCalorimeter/src/components/ReadCaloCrosstalkMap.cpp b/RecCalorimeter/src/components/ReadCaloCrosstalkMap.cpp new file mode 100644 index 00000000..ed29da31 --- /dev/null +++ b/RecCalorimeter/src/components/ReadCaloCrosstalkMap.cpp @@ -0,0 +1,60 @@ +#include "ReadCaloCrosstalkMap.h" + +#include "TFile.h" +#include "TTree.h" +#include "TBranch.h" + +DECLARE_COMPONENT(ReadCaloCrosstalkMap) + +ReadCaloCrosstalkMap::ReadCaloCrosstalkMap(const std::string& type, const std::string& name, + const IInterface* parent) + : GaudiTool(type, name, parent) { + declareInterface(this); +} + +StatusCode ReadCaloCrosstalkMap::initialize() { + // prevent to initialize the tool if not intended (input file path empty) + // otherwise things will crash if m_fileName is not available + // not a perfect solution but tools seems to not be meant to be optional + if (m_fileName == "") { + debug() << "Empty 'fileName' provided, it means cross-talk map is not needed, exitting ReadCaloCrosstalkMap initilization" << endmsg; + return StatusCode::SUCCESS; + } + + StatusCode sc = GaudiTool::initialize(); + info() <<"Loading crosstalk map..." << endmsg; + if (sc.isFailure()) return sc; + std::unique_ptr file(TFile::Open(m_fileName.value().c_str(),"READ")); + TTree* tree = nullptr; + file->GetObject("crosstalk_neighbours",tree); + ULong64_t read_cellId; + std::vector *read_neighbours=0; + std::vector *read_crosstalks=0; + + tree->SetBranchAddress("cellId",&read_cellId); + tree->SetBranchAddress("list_crosstalk_neighbours", &read_neighbours); + tree->SetBranchAddress("list_crosstalks", &read_crosstalks); + for (uint i = 0; i < tree->GetEntries(); i++) { + tree->GetEntry(i); + m_mapNeighbours.insert(std::pair>(read_cellId, *read_neighbours)); + m_mapCrosstalks.insert(std::pair>(read_cellId, *read_crosstalks)); + } + + info() <<"Crosstalk input: " << m_fileName.value().c_str() << endmsg; + info() << "Total number of cells = " << tree->GetEntries() << ", Size of crosstalk neighbours = " << m_mapNeighbours.size() << ", Size of coefficients = " << m_mapCrosstalks.size() << endmsg; + delete tree; + delete read_neighbours; + delete read_crosstalks; + file->Close(); + return sc; +} + +StatusCode ReadCaloCrosstalkMap::finalize() { return GaudiTool::finalize(); } + +std::vector& ReadCaloCrosstalkMap::getNeighbours(uint64_t aCellId) { + return m_mapNeighbours[aCellId]; +} + +std::vector& ReadCaloCrosstalkMap::getCrosstalks(uint64_t aCellId) { + return m_mapCrosstalks[aCellId]; +} diff --git a/RecCalorimeter/src/components/ReadCaloCrosstalkMap.h b/RecCalorimeter/src/components/ReadCaloCrosstalkMap.h new file mode 100644 index 00000000..ed65fbe3 --- /dev/null +++ b/RecCalorimeter/src/components/ReadCaloCrosstalkMap.h @@ -0,0 +1,49 @@ +#ifndef RECCALORIMETER_READCALOXTALKMAP_H +#define RECCALORIMETER_READCALOXTALKMAP_H + +// from Gaudi +#include "GaudiAlg/GaudiTool.h" + +// k4FWCore +#include "k4Interface/ICaloReadCrosstalkMap.h" + +class IGeoSvc; + +/** @class ReadCaloCrosstalkMap Reconstruction/RecCalorimeter/src/components/ReadCaloCrosstalkMap.h + *TopoCaloNeighbours.h + * + * Tool that reads a ROOT file containing the TTree with branches "cellId", "list_crosstalk_neighbours" and "list_crosstalks". + * This tools reads the tree, creates two maps, and allows a lookup of all crosstalk neighbours as well as the corresponding crosstalk coefficients for a given cell. + * + * @author Zhibo Wu + */ + +class ReadCaloCrosstalkMap : public GaudiTool, virtual public ICaloReadCrosstalkMap { +public: + ReadCaloCrosstalkMap(const std::string& type, const std::string& name, const IInterface* parent); + virtual ~ReadCaloCrosstalkMap() = default; + + virtual StatusCode initialize() final; + virtual StatusCode finalize() final; + + /** Function to be called for the crosstalk neighbours of a cell. + * @param[in] aCellId, cellid of the cell of interest. + * @return vector of cellIDs, corresponding to the crosstalk neighbours. + */ + virtual std::vector& getNeighbours(uint64_t aCellId) final; + + /** Function to be called for the crosstalk coefficients between the input cell and its neighbouring cells. + * @param[in] aCellId, cellid of the cell of interest. + * @return vector of crosstalk coefficients. + */ + virtual std::vector& getCrosstalks(uint64_t aCellId) final; + +private: + /// Name of input root file that contains the TTree with cellID->vec and cellId->vec + Gaudi::Property m_fileName{this, "fileName", "", "Name of the file that contains the crosstalk map. Leave the default empty to avoid crashes when cross-talk is not needed."}; + /// Output maps to be used for the fast lookup in the creating calo-cells algorithm + std::unordered_map> m_mapNeighbours; + std::unordered_map> m_mapCrosstalks; +}; + +#endif /* RECCALORIMETER_READCALOXTALKMAP_H */ diff --git a/RecCalorimeter/tests/options/runEcalBarrel_ReconstructionTopoClusters_crosstalk.py b/RecCalorimeter/tests/options/runEcalBarrel_ReconstructionTopoClusters_crosstalk.py new file mode 100644 index 00000000..47e55b41 --- /dev/null +++ b/RecCalorimeter/tests/options/runEcalBarrel_ReconstructionTopoClusters_crosstalk.py @@ -0,0 +1,423 @@ +# +# IMPORTS +# +from Configurables import ApplicationMgr +from Configurables import EventCounter +from Configurables import AuditorSvc, ChronoAuditor +# Input/output +from Configurables import k4DataSvc, PodioInput +from Configurables import PodioOutput +# Geometry +from Configurables import GeoSvc +# Create cells +from Configurables import CreateCaloCells +from Configurables import CreateEmptyCaloCellsCollection +# Cell positioning tools +from Configurables import CreateCaloCellPositionsFCCee +from Configurables import CellPositionsECalBarrelModuleThetaSegTool +# Redo segmentation for ECAL +from Configurables import RedoSegmentation +# Change HCAL segmentation +from Configurables import RewriteBitfield +# Apply sampling fraction corrections +from Configurables import CalibrateCaloHitsTool +from Configurables import CalibrateInLayersTool +# Up/down stream correction +from Configurables import CorrectCaloClusters +# SW clustering +from Configurables import CaloTowerToolFCCee +from Configurables import CreateCaloClustersSlidingWindowFCCee +# Topo clustering +from Configurables import CaloTopoClusterInputTool +from Configurables import TopoCaloNeighbours +from Configurables import TopoCaloNoisyCells +from Configurables import CaloTopoClusterFCCee +# Decorate clusters with shower shape parameters +from Configurables import AugmentClustersFCCee +# Read crosstalk map +from Configurables import ReadCaloCrosstalkMap +# Logger +from Gaudi.Configuration import INFO, VERBOSE, DEBUG +# units and physical constants +from GaudiKernel.SystemOfUnits import GeV, tesla, mm +from GaudiKernel.PhysicalConstants import pi, halfpi, twopi +# python libraries +import os +from math import cos, sin, tan + +# +# SETTINGS +# + +# - general settings +# +inputfile = "https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/ALLEGRO_sim.root" +Nevts = 50 # -1 means all events +dumpGDML = False + +# - what to save in output file +# +# for big productions, save significant space removing hits and cells +# however, hits and cluster cells might be wanted for small productions for detailed event displays +# also, cluster cells are needed for the MVA training +# saveHits = False +# saveCells = False +saveHits = True +saveCells = True +saveClusterCells = True +doCrosstalk = True # switch on/off the crosstalk + +# ECAL barrel parameters for digitisation +samplingFraction=[0.37586625991994105] * 1 + [0.13459486704309379] * 1 + [0.142660085165352] * 1 + [0.14768106642302886] * 1 + [0.15205230356024715] * 1 + [0.15593671843591686] * 1 + [0.15969313426201745] * 1 + [0.16334257010426537] * 1 + [0.16546584993953908] * 1 + [0.16930439771304764] * 1 + [0.1725913708958098] * 1 +upstreamParameters = [[0.025582045561310333, -0.9524128168665387, -53.10089405478649, 1.283851527438571, -295.30650178662637, -284.8945817377308]] +downstreamParameters = [[0.0018280333929494054, 0.004932212590963076, 0.8409676097173655, -1.2676690014715288, 0.005347798049886769, 4.161741293789687]] + +ecalBarrelLayers = len(samplingFraction) +resegmentECalBarrel = False + +# - parameters for clustering +# +doSWClustering = True +doTopoClustering = True + +# calculate cluster energy and barycenter per layer and save it as extra parameters +addShapeParameters = True +ecalBarrelThetaWeights = [-1, 3.0, 3.0, 3.0, 4.25, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0] # to be recalculated for V03, separately for topo and calo clusters... + +# +# ALGORITHMS AND SERVICES SETUP +# + +# Input: load the output of the SIM step +evtsvc = k4DataSvc('EventDataSvc') +evtsvc.input = inputfile +input_reader = PodioInput('InputReader') +podioevent = k4DataSvc("EventDataSvc") + +# Detector geometry +# prefix all xmls with path_to_detector +# if K4GEO is empty, this should use relative path to working directory +geoservice = GeoSvc("GeoSvc") +path_to_detector = os.environ.get("K4GEO", "") +detectors_to_use = [ + 'FCCee/ALLEGRO/compact/ALLEGRO_o1_v03/ALLEGRO_o1_v03.xml' +] +geoservice.detectors = [ + os.path.join(path_to_detector, _det) for _det in detectors_to_use +] +geoservice.OutputLevel = INFO + +# GDML dump of detector model +if dumpGDML: + from Configurables import GeoToGdmlDumpSvc + gdmldumpservice = GeoToGdmlDumpSvc("GeoToGdmlDumpSvc") + +# Digitisation (merging hits into cells, EM scale calibration via sampling fractions) + +# - ECAL readouts +ecalBarrelReadoutName = "ECalBarrelModuleThetaMerged" +ecalBarrelReadoutName2 = "ECalBarrelModuleThetaMerged2" +ecalEndcapReadoutName = "ECalEndcapPhiEta" + +hcalBarrelReadoutName = "" +hcalBarrelReadoutName2 = "" +hcalEndcapReadoutName = "" + +# - EM scale calibration (sampling fraction) +# * ECAL barrel +calibEcalBarrel = CalibrateInLayersTool("CalibrateECalBarrel", + samplingFraction=samplingFraction, + readoutName=ecalBarrelReadoutName, + layerFieldName="layer") +# * ECAL endcap +calibEcalEndcap = CalibrateCaloHitsTool( + "CalibrateECalEndcap", invSamplingFraction="4.27") # FIXME: to be updated for ddsim + +# Create cells in ECal barrel (needed if one wants to apply cell calibration, +# which is not performed by ddsim) + +# read the crosstalk map +readCrosstalkMap = ReadCaloCrosstalkMap("ReadCrosstalkMap", + fileName="https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/xtalk_neighbours_map_ecalB_thetamodulemerged.root", + OutputLevel=INFO) + +# - merge hits into cells according to initial segmentation +ecalBarrelCellsName = "ECalBarrelCells" +createEcalBarrelCells = CreateCaloCells("CreateECalBarrelCells", + doCellCalibration=True, + calibTool=calibEcalBarrel, + crosstalksTool=readCrosstalkMap, + addCrosstalk=doCrosstalk, + addCellNoise=False, + filterCellNoise=False, + addPosition=True, + OutputLevel=INFO, + hits=ecalBarrelReadoutName, + cells=ecalBarrelCellsName) + +# - add to Ecal barrel cells the position information +# (good for physics, all coordinates set properly) +cellPositionEcalBarrelTool = CellPositionsECalBarrelModuleThetaSegTool( + "CellPositionsECalBarrel", + readoutName=ecalBarrelReadoutName, + OutputLevel=INFO +) +ecalBarrelPositionedCellsName = ecalBarrelReadoutName + "Positioned" +createEcalBarrelPositionedCells = CreateCaloCellPositionsFCCee( + "CreateECalBarrelPositionedCells", + OutputLevel=INFO +) +createEcalBarrelPositionedCells.positionsTool = cellPositionEcalBarrelTool +createEcalBarrelPositionedCells.hits.Path = ecalBarrelCellsName +createEcalBarrelPositionedCells.positionedHits.Path = ecalBarrelPositionedCellsName + +# Create cells in ECal endcap (needed if one wants to apply cell calibration, +# which is not performed by ddsim) +createEcalEndcapCells = CreateCaloCells("CreateEcalEndcapCaloCells", + doCellCalibration=True, + calibTool=calibEcalEndcap, + addCellNoise=False, + filterCellNoise=False, + OutputLevel=INFO) +createEcalEndcapCells.hits.Path = ecalEndcapReadoutName +createEcalEndcapCells.cells.Path = "ECalEndcapCells" + +hcalBarrelCellsName = "emptyCaloCells" +hcalBarrelPositionedCellsName = "emptyCaloCells" +hcalBarrelCellsName2 = "emptyCaloCells" +hcalBarrelPositionedCellsName2 = "emptyCaloCells" +cellPositionHcalBarrelTool = None +cellPositionHcalBarrelTool2 = None + +# Empty cells for parts of calorimeter not implemented yet +createemptycells = CreateEmptyCaloCellsCollection("CreateEmptyCaloCells") +createemptycells.cells.Path = "emptyCaloCells" + +# Produce sliding window clusters +if doSWClustering: + towers = CaloTowerToolFCCee("towers", + deltaThetaTower=4 * 0.009817477 / 4, deltaPhiTower=2 * 2 * pi / 1536., + ecalBarrelReadoutName=ecalBarrelReadoutName, + ecalEndcapReadoutName=ecalEndcapReadoutName, + ecalFwdReadoutName="", + hcalBarrelReadoutName=hcalBarrelReadoutName2, + hcalExtBarrelReadoutName="", + hcalEndcapReadoutName="", + hcalFwdReadoutName="", + OutputLevel=INFO) + towers.ecalBarrelCells.Path = ecalBarrelPositionedCellsName + towers.ecalEndcapCells.Path = "ECalEndcapCells" + towers.ecalFwdCells.Path = "emptyCaloCells" + towers.hcalBarrelCells.Path = hcalBarrelPositionedCellsName2 + towers.hcalExtBarrelCells.Path = "emptyCaloCells" + towers.hcalEndcapCells.Path = "emptyCaloCells" + towers.hcalFwdCells.Path = "emptyCaloCells" + + # Cluster variables + windT = 9 + windP = 17 + posT = 5 + posP = 11 + dupT = 7 + dupP = 13 + finT = 9 + finP = 17 + # Minimal energy to create a cluster in GeV (FCC-ee detectors have to reconstruct low energy particles) + threshold = 0.040 + + createClusters = CreateCaloClustersSlidingWindowFCCee("CreateClusters", + towerTool=towers, + nThetaWindow=windT, nPhiWindow=windP, + nThetaPosition=posT, nPhiPosition=posP, + nThetaDuplicates=dupT, nPhiDuplicates=dupP, + nThetaFinal=finT, nPhiFinal=finP, + energyThreshold=threshold, + energySharingCorrection=False, + attachCells=True, + OutputLevel=INFO + ) + createClusters.clusters.Path = "CaloClusters" + createClusters.clusterCells.Path = "CaloClusterCells" + + if addShapeParameters: + augmentCaloClusters = AugmentClustersFCCee("augmentCaloClusters", + inClusters=createClusters.clusters.Path, + outClusters="Augmented" + createClusters.clusters.Path, + systemIDs=[4], + systemNames=["EMB"], + numLayers=[ecalBarrelLayers], + readoutNames=[ecalBarrelReadoutName], + layerFieldNames=["layer"], + thetaRecalcWeights=[ecalBarrelThetaWeights], + OutputLevel=INFO + ) + +if doTopoClustering: + # Produce topoclusters (ECAL only or ECAL+HCAL) + createTopoInput = CaloTopoClusterInputTool("CreateTopoInput", + ecalBarrelReadoutName=ecalBarrelReadoutName, + ecalEndcapReadoutName="", + ecalFwdReadoutName="", + hcalBarrelReadoutName=hcalBarrelReadoutName2, + hcalExtBarrelReadoutName="", + hcalEndcapReadoutName="", + hcalFwdReadoutName="", + OutputLevel=INFO) + + createTopoInput.ecalBarrelCells.Path = ecalBarrelPositionedCellsName + createTopoInput.ecalEndcapCells.Path = "emptyCaloCells" + createTopoInput.ecalFwdCells.Path = "emptyCaloCells" + createTopoInput.hcalBarrelCells.Path = hcalBarrelPositionedCellsName2 + createTopoInput.hcalExtBarrelCells.Path = "emptyCaloCells" + createTopoInput.hcalEndcapCells.Path = "emptyCaloCells" + createTopoInput.hcalFwdCells.Path = "emptyCaloCells" + cellPositionHcalBarrelNoSegTool = None + cellPositionHcalExtBarrelTool = None + + neighboursMap = "https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/neighbours_map_ecalB_thetamodulemerged.root" + noiseMap = "https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/cellNoise_map_electronicsNoiseLevel_ecalB_thetamodulemerged.root" + + readNeighboursMap = TopoCaloNeighbours("ReadNeighboursMap", + fileName=neighboursMap, + OutputLevel=INFO) + + # Noise levels per cell + readNoisyCellsMap = TopoCaloNoisyCells("ReadNoisyCellsMap", + fileName=noiseMap, + OutputLevel=INFO) + + createTopoClusters = CaloTopoClusterFCCee("CreateTopoClusters", + TopoClusterInput=createTopoInput, + # expects neighbours map from cellid->vec < neighbourIds > + neigboursTool=readNeighboursMap, + # tool to get noise level per cellid + noiseTool=readNoisyCellsMap, + # cell positions tools for all sub - systems + positionsECalBarrelTool=cellPositionEcalBarrelTool, + positionsHCalBarrelTool=cellPositionHcalBarrelTool2, + # positionsHCalBarrelNoSegTool=cellPositionHcalBarrelNoSegTool, + # positionsHCalExtBarrelTool=cellPositionHcalExtBarrelTool, + # positionsHCalExtBarrelTool = HCalExtBcells, + # positionsEMECTool = EMECcells, + # positionsHECTool = HECcells, + # positionsEMFwdTool = ECalFwdcells, + # positionsHFwdTool = HCalFwdcells, + noSegmentationHCal=False, + seedSigma=4, + neighbourSigma=2, + lastNeighbourSigma=0, + OutputLevel=INFO) + createTopoClusters.clusters.Path = "CaloTopoClusters" + createTopoClusters.clusterCells.Path = "CaloTopoClusterCells" + + + # Correction below is for EM-only clusters + # Need something different for EM+HCAL + if addShapeParameters: + augmentCaloTopoClusters = AugmentClustersFCCee("augmentCaloTopoClusters", + inClusters=createTopoClusters.clusters.Path, + outClusters="Augmented" + createTopoClusters.clusters.Path, + systemIDs=[4], + systemNames=["EMB"], + numLayers=[ecalBarrelLayers], + readoutNames=[ecalBarrelReadoutName], + layerFieldNames=["layer"], + thetaRecalcWeights=[ecalBarrelThetaWeights], + OutputLevel=INFO) +# Output +out = PodioOutput("out", + OutputLevel=INFO) +out.filename = "ALLEGRO_sim_digi_reco.root" + +# drop the unpositioned ECal barrel cells +out.outputCommands = ["keep *", "drop HCal*", "drop emptyCaloCells", "drop ECalBarrelCells*"] +out.outputCommands.append("drop %s" % ecalBarrelReadoutName) +out.outputCommands.append("drop %s" % ecalBarrelReadoutName2) +out.outputCommands.append("drop ECalBarrelCellsMerged") + +# drop lumi, vertex, DCH, Muons (unless want to keep for event display) +out.outputCommands.append("drop Lumi*") +# out.outputCommands.append("drop Vertex*") +# out.outputCommands.append("drop DriftChamber_simHits*") +out.outputCommands.append("drop MuonTagger*") + +# drop hits/positioned cells/cluster cells if desired +if not saveHits: + out.outputCommands.append("drop %s_contributions" % ecalBarrelReadoutName) + out.outputCommands.append("drop %s_contributions" % ecalBarrelReadoutName2) +if not saveCells: + out.outputCommands.append("drop %s" % ecalBarrelPositionedCellsName) + out.outputCommands.append("drop %s" % ecalBarrelPositionedCellsName2) + +if not saveClusterCells: + out.outputCommands.append("drop Calo*ClusterCells*") + +# CPU information +chra = ChronoAuditor() +audsvc = AuditorSvc() +audsvc.Auditors = [chra] +out.AuditExecute = True + +# Event counter +event_counter = EventCounter('event_counter') +event_counter.Frequency = 10 + +# Configure list of external services +ExtSvc = [geoservice, podioevent, audsvc] +if dumpGDML: + ExtSvc += [gdmldumpservice] + +# Setup alg sequence +TopAlg = [ + event_counter, + input_reader, + createEcalBarrelCells, + createEcalBarrelPositionedCells, + createEcalEndcapCells +] +createEcalBarrelCells.AuditExecute = True +createEcalBarrelPositionedCells.AuditExecute = True +createEcalEndcapCells.AuditExecute = True + +if resegmentECalBarrel: + TopAlg += [ + resegmentEcalBarrelTool, + createEcalBarrelCells2, + createEcalBarrelPositionedCells2, + ] + resegmentEcalBarrelTool.AuditExecute = True + createEcalBarrelCells2.AuditExecute = True + createEcalBarrelPositionedCells2.AuditExecute = True + +if doSWClustering or doTopoClustering: + TopAlg += [createemptycells] + createemptycells.AuditExecute = True + + if doSWClustering: + TopAlg += [createClusters] + createClusters.AuditExecute = True + + if addShapeParameters: + TopAlg += [augmentCaloClusters] + augmentCaloClusters.AuditExecute = True + + if doTopoClustering: + TopAlg += [createTopoClusters] + createTopoClusters.AuditExecute = True + + if addShapeParameters: + TopAlg += [augmentCaloTopoClusters] + augmentCaloTopoClusters.AuditExecute = True + +TopAlg += [ + out +] + +ApplicationMgr( + TopAlg=TopAlg, + EvtSel='NONE', + EvtMax=Nevts, + ExtSvc=ExtSvc, + StopOnSignal=True, +) + diff --git a/RecFCCeeCalorimeter/CMakeLists.txt b/RecFCCeeCalorimeter/CMakeLists.txt index c6d0beeb..d7672479 100644 --- a/RecFCCeeCalorimeter/CMakeLists.txt +++ b/RecFCCeeCalorimeter/CMakeLists.txt @@ -27,7 +27,8 @@ gaudi_add_module(k4RecFCCeeCalorimeterPlugins DD4hep::DDG4 ROOT::Core ROOT::Hist - ${ONNXRUNTIME_LIBRARY} + ${ONNXRUNTIME_LIBRARY} + nlohmann_json::nlohmann_json ) install(TARGETS k4RecFCCeeCalorimeterPlugins EXPORT k4RecCalorimeterTargets @@ -38,12 +39,6 @@ install(TARGETS k4RecFCCeeCalorimeterPlugins install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/tests DESTINATION ${CMAKE_INSTALL_DATADIR}/${CMAKE_PROJECT_NAME}/RecFCCeeCalorimeter) -add_test(NAME FCCeeLAr_reproduceSegfault - COMMAND k4run RecFCCeeCalorimeter/tests/options/reproduceSegfault.py - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} -) -set_test_env(FCCeeLAr_reproduceSegfault) - add_test(NAME FCCeeLAr_simulateForReco COMMAND k4run RecFCCeeCalorimeter/tests/options/runCaloSim.py WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} @@ -80,3 +75,9 @@ add_test(NAME FCCeeLAr_benchmarkCorrection WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} ) set_test_env(FCCeeLAr_benchmarkCorrection) + +add_test(NAME FCCeeLAr_runxtalk + COMMAND k4run RecCalorimeter/tests/options/runEcalBarrel_ReconstructionTopoClusters_crosstalk.py + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} +) +set_test_env(FCCeeLAr_runxtalk) diff --git a/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.cpp b/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.cpp index f648b968..c30b0f0f 100644 --- a/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.cpp +++ b/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.cpp @@ -9,6 +9,10 @@ // DD4hep #include "DD4hep/Detector.h" +// k4geo +#include "detectorSegmentations/FCCSWGridPhiTheta_k4geo.h" +#include "detectorSegmentations/FCCSWGridModuleThetaMerged_k4geo.h" + // ROOT #include "TLorentzVector.h" #include "TString.h" @@ -60,15 +64,63 @@ StatusCode AugmentClustersFCCee::initialize() error() << "Sizes of systemIDs vector and layerFieldNames vector do not match, exiting!" << endmsg; return StatusCode::FAILURE; } + if (m_systemIDs.size() != m_thetaFieldNames.size()) + { + error() << "Sizes of systemIDs vector and thetaFieldNames vector do not match, exiting!" << endmsg; + return StatusCode::FAILURE; + } + if (m_systemIDs.size() != m_moduleFieldNames.size()) + { + error() << "Sizes of systemIDs vector and moduleFieldNames vector do not match, exiting!" << endmsg; + return StatusCode::FAILURE; + } if (m_systemIDs.size() != m_detectorNames.size()) { error() << "Sizes of systemIDs vector and detectorNames vector do not match, exiting!" << endmsg; return StatusCode::FAILURE; } + // retrieve systemID of EMB from "DetID_ECAL_Barrel" + systemID_EMB = m_geoSvc->getDetector()->constantAsDouble("DetID_ECAL_Barrel"); + + // retrieve some information from the segmentation for later use + // - number of modules/phi bins => needed to account for module/phi periodicity + // - number of merged modules and theta cells per layer => needed to check if two cells are neighbours + for (size_t k = 0; k < m_readoutNames.size(); k++) + { + std::string readoutName = m_readoutNames[k]; + dd4hep::DDSegmentation::Segmentation *aSegmentation = m_geoSvc->getDetector()->readout(readoutName).segmentation().segmentation(); + std::string segmentationType = aSegmentation->type(); + if (segmentationType == "FCCSWGridModuleThetaMerged_k4geo") + { + dd4hep::DDSegmentation::FCCSWGridModuleThetaMerged_k4geo *moduleThetaSegmentation = dynamic_cast(aSegmentation); + nModules.push_back(moduleThetaSegmentation->nModules()); + for (size_t iLayer = 0; iLayer < m_numLayers[k]; ++iLayer) + { + nMergedThetaCells.push_back(moduleThetaSegmentation->mergedThetaCells(iLayer)); + nMergedModules.push_back(moduleThetaSegmentation->mergedModules(iLayer)); + } + } + else if (segmentationType == "FCCSWGridPhiTheta_k4geo") + { + dd4hep::DDSegmentation::FCCSWGridPhiTheta_k4geo *phiThetaSegmentation = dynamic_cast(aSegmentation); + nModules.push_back(phiThetaSegmentation->phiBins()); + for (size_t iLayer = 0; iLayer < m_numLayers[k]; ++iLayer) + { + nMergedThetaCells.push_back(1); + nMergedModules.push_back(1); + } + } + else + { + error() << "Segmentation type not handled, aborting..." << endmsg; + return StatusCode::FAILURE; + } + } + // initialise the list of metadata for the clusters - // append to the metadata of the input clusters - std::vector showerShapeDecorations = m_inShapeParameterHandle.get(); + // append to the metadata of the input clusters (if any) + std::vector showerShapeDecorations = m_inShapeParameterHandle.get({}); for (size_t k = 0; k < m_detectorNames.size(); k++) { const char *detector = m_detectorNames[k].c_str(); @@ -77,13 +129,67 @@ StatusCode AugmentClustersFCCee::initialize() showerShapeDecorations.push_back(Form("energy_fraction_%s_layer_%d", detector, layer)); showerShapeDecorations.push_back(Form("theta_%s_layer_%d", detector, layer)); showerShapeDecorations.push_back(Form("phi_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("maxcell_E_%s_layer_%d", detector, layer)); + // pi0/photon shape var only calculated in EMB + if (m_do_photon_shapeVar && m_systemIDs[k] == systemID_EMB) { + showerShapeDecorations.push_back(Form("width_theta_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("width_module_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("Ratio_E_max_2ndmax_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("Delta_E_2ndmax_min_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("Ratio_E_max_2ndmax_vs_phi_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("Delta_E_2ndmax_min_vs_phi_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("width_theta_3Bin_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("width_theta_5Bin_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("width_theta_7Bin_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("width_theta_9Bin_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("E_fr_side_pm2_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("E_fr_side_pm3_%s_layer_%d", detector, layer)); + showerShapeDecorations.push_back(Form("E_fr_side_pm4_%s_layer_%d", detector, layer)); + } } } + showerShapeDecorations.push_back("mass"); // cluster invariant mass assuming massless constituents + showerShapeDecorations.push_back("ncells"); // number of cells in cluster with E>0 + m_showerShapeHandle.put(showerShapeDecorations); return StatusCode::SUCCESS; } +// for all cells in a certain layer: vec A is theta_id, vec B is E_cell +// make a 1D projection to theta: sum up the E_cell over modules with the same theta_id +// then sort the theta vec (for finding theta neighbors), update the E vec simultaneously +// return a pair of vectors: theta_id (in ascending order) and E (summed over modules) +std::pair, std::vector> MergeSumAndSort(const std::vector& A, const std::vector& B) { + std::unordered_map elementSum; + // merge the same elements in vec A and sum up the corresponding elements in vec B + for (size_t i = 0; i < A.size(); i ++) { + elementSum[A[i]] += B[i]; + } + std::vector A_new; + std::vector B_new; + // re-build vec A and vec B from elementSum + for (const auto& entry : elementSum) { + A_new.push_back(entry.first); + B_new.push_back(entry.second); + } + std::vector vec_1(A_new.size()); + std::vector vec_2(B_new.size()); + std::vector indices(A_new.size()); + for (size_t i = 0; i < indices.size(); ++ i) { + indices[i] = i; + } + // sort the theta vec, update the E vec simultaneously + std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return A_new[a] < A_new[b]; + }); + for (size_t i = 0; i < indices.size(); ++ i) { + vec_1[i] = A_new[indices[i]]; + vec_2[i] = B_new[indices[i]]; + } + return std::make_pair(vec_1, vec_2); +} + StatusCode AugmentClustersFCCee::finalize() { return Gaudi::Algorithm::finalize(); @@ -91,11 +197,8 @@ StatusCode AugmentClustersFCCee::finalize() StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &evtCtx) const { - - // get the input collection with clusters const edm4hep::ClusterCollection *inClusters = m_inClusters.get(); - // create the new output collection edm4hep::ClusterCollection *outClusters = m_outClusters.createAndPut(); @@ -113,54 +216,83 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev auto newCluster = cluster.clone(); outClusters->push_back(newCluster); - // calculate the energy deposited in each layer - // also, find out if cluster is around -pi..pi transition + // loop over all cells to: + // - calculate the cluster invariant mass + // - calculate the energy deposited in each layer + // - find out the energy of the cells with largest energy in each layer + // - find out if cluster is around -pi..pi transition and/or max module .. 0 transition double E(0.0); - std::vector sumEnLayer; - sumEnLayer.assign(numLayersTotal, 0.); + TLorentzVector p4cl(0.0, 0.0, 0.0, 0.0); + unsigned int nCells(0); + std::vector sumEnLayer(numLayersTotal, 0.); + std::vector maxCellEnergyInLayer(numLayersTotal, 0.); double phiMin = 9999.; double phiMax = -9999.; + int module_id_Min = 9999; + int module_id_Max = -9999; // loop over each system/readout int startPositionToFill = 0; for (size_t k = 0; k < m_readoutNames.size(); k++) { if (k > 0) - { startPositionToFill += m_numLayers[k - 1]; - } int systemID = m_systemIDs[k]; std::string layerField = m_layerFieldNames[k]; + std::string moduleField = m_moduleFieldNames[k]; std::string readoutName = m_readoutNames[k]; dd4hep::DDSegmentation::BitFieldCoder *decoder = m_geoSvc->getDetector()->readout(readoutName).idSpec().decoder(); - // loop over the cells + // loop over the cells to get E_layer, E_cluster, max and min phi of cluster cells for (auto cell = newCluster.hits_begin(); cell != newCluster.hits_end(); cell++) { dd4hep::DDSegmentation::CellID cID = cell->getCellID(); + + // skip cells with wrong system ID int sysId = decoder->get(cID, "system"); if (sysId != systemID) continue; + + // retrieve layer and module ID uint layer = decoder->get(cID, layerField); - sumEnLayer[layer+startPositionToFill] += cell->getEnergy(); - E += cell->getEnergy(); + int module_id = decoder->get(cID, moduleField); + + // retrieve cell energy, update sum of cell energies per layer, total energy, and max cell energy + double eCell = cell->getEnergy(); + sumEnLayer[layer+startPositionToFill] += eCell; + E += eCell; + if (maxCellEnergyInLayer[layer+startPositionToFill]getPosition().x, cell->getPosition().y, cell->getPosition().z); double phi = v.Phi(); - if (phi < phiMin) - phiMin = phi; - if (phi > phiMax) - phiMax = phi; - } - } - + if (phi < phiMin) phiMin = phi; + if (phi > phiMax) phiMax = phi; + if (module_id > module_id_Max) module_id_Max = module_id; + if (module_id < module_id_Min) module_id_Min = module_id; + + // add cell 4-momentum to cluster 4-momentum + TVector3 pCell = v * (eCell / v.Mag()); + TLorentzVector p4cell(pCell.X(), pCell.Y(), pCell.Z(), eCell); + p4cl += p4cell; + nCells++; + } // end of loop over cells + } // end of loop over system / readout + //std::cout << "Ecl = " << E << std::endl; + // any number close to two pi should do, because if a cluster contains // the -pi<->pi transition, phiMin should be close to -pi and phiMax close to pi bool isClusterPhiNearPi = false; - if (phiMax - phiMin > 6.) - isClusterPhiNearPi = true; + if (phiMax - phiMin > 6.) isClusterPhiNearPi = true; + debug() << "phiMin, phiMax : " << phiMin << " " << phiMax << endmsg; debug() << "Cluster is near phi=pi : " << isClusterPhiNearPi << endmsg; + bool isResetModuleID = false; + // near the 1535..0 transition, reset module ID + if (module_id_Max - module_id_Min > nModules[0] * .9) isResetModuleID = true; + // calculate the theta positions with log(E) weighting in each layer // for phi use standard E weighting std::vector sumThetaLayer; @@ -169,6 +301,56 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev sumThetaLayer.assign(numLayersTotal, 0); sumPhiLayer.assign(numLayersTotal, 0); sumWeightLayer.assign(numLayersTotal, 0); + + // vectors for photon/pi0 discrimination + // vectors to calculate theta/module width vs layer - using all cells + std::vector theta2_E_layer(numLayersTotal, 0.); + std::vector theta_E_layer(numLayersTotal, 0.); + std::vector module2_E_layer(numLayersTotal, 0.); + std::vector module_E_layer(numLayersTotal, 0.); + + // E, theta and module ID of cells in each layer + std::vector> vec_E_cell_layer(numLayersTotal, std::vector()); + std::vector> vec_theta_cell_layer(numLayersTotal, std::vector()); + std::vector> vec_module_cell_layer(numLayersTotal, std::vector()); + + // E and theta of 1st and 2nd local max in 1D theta profile, and E of minimum between them + std::vector E_cell_Max(numLayersTotal, 0.); + std::vector E_cell_secMax(numLayersTotal, 0.); + std::vector E_cell_Max_theta(numLayersTotal, 0.); + std::vector E_cell_secMax_theta(numLayersTotal, 0.); + std::vector E_cell_Min(numLayersTotal, std::numeric_limits::max()); + + // E and theta of 1st and 2nd local max in 1D module profile, and E of minimum between them + std::vector E_cell_vs_phi_Max(numLayersTotal, 0.); + std::vector E_cell_vs_phi_secMax(numLayersTotal, 0.); + std::vector E_cell_vs_phi_Max_module(numLayersTotal, 0.); + std::vector E_cell_vs_phi_secMax_module(numLayersTotal, 0.); + std::vector E_cell_vs_phi_Min(numLayersTotal, std::numeric_limits::max()); + + // theta/module width using all cells + std::vector width_theta(numLayersTotal, 0.); + std::vector width_module(numLayersTotal, 0.); + // theta width using only cells within deltaThetaBin = +-1, +-2, +-3, +-4 + std::vector width_theta_3Bin(numLayersTotal, 0.); + std::vector width_theta_5Bin(numLayersTotal, 0.); + std::vector width_theta_7Bin(numLayersTotal, 0.); + std::vector width_theta_9Bin(numLayersTotal, 0.); + + // E_fr_side_N, (E around maxE +- N bins) / (E around maxE +- 1 bins) - 1. + std::vector E_fr_side_pm2(numLayersTotal, 0.); + std::vector E_fr_side_pm3(numLayersTotal, 0.); + std::vector E_fr_side_pm4(numLayersTotal, 0.); + + // (Emax - E2ndmax)/(Emax + E2ndmax) where 2nd max must be a local maximum + std::vector Ratio_E_max_2ndmax(numLayersTotal, 0.); + // (E2ndmax - Emin) where Emin is the energy with minimum energy in the theta range defined by 1st and 2nd (local) max + std::vector Delta_E_2ndmax_min(numLayersTotal, 0.); + // (Emax - E2ndmax)/(Emax + E2ndmax) where 2nd max must be a local maximum - in phi profile + std::vector Ratio_E_max_2ndmax_vs_phi(numLayersTotal, 0.); + // (E2ndmax - Emin) where Emin is the energy with minimum energy in the module range defined by 1st and 2nd (local) max + std::vector Delta_E_2ndmax_min_vs_phi(numLayersTotal, 0.); + // loop over each system/readout startPositionToFill = 0; // rather than a loop over the systems, could first determine systemID @@ -177,14 +359,15 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev for (size_t k = 0; k < m_readoutNames.size(); k++) { if (k > 0) - { startPositionToFill += m_numLayers[k - 1]; - } int systemID = m_systemIDs[k]; std::string layerField = m_layerFieldNames[k]; + std::string thetaField = m_thetaFieldNames[k]; + std::string moduleField = m_moduleFieldNames[k]; std::string readoutName = m_readoutNames[k]; dd4hep::DDSegmentation::BitFieldCoder *decoder = m_geoSvc->getDetector()->readout(readoutName).idSpec().decoder(); + // loop over the cells for (auto cell = newCluster.hits_begin(); cell != newCluster.hits_end(); cell++) { dd4hep::DDSegmentation::CellID cID = cell->getCellID(); @@ -192,23 +375,220 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev if (sysId != systemID) continue; uint layer = decoder->get(cID, layerField); + int theta_id = decoder->get(cID, thetaField); + int module_id = decoder->get(cID, moduleField); + double eCell = cell->getEnergy(); - double weightLog = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(eCell / sumEnLayer[layer])); + double weightLog = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(eCell / sumEnLayer[layer+startPositionToFill])); TVector3 v = TVector3(cell->getPosition().x, cell->getPosition().y, cell->getPosition().z); double theta = v.Theta(); double phi = v.Phi(); + // for clusters that are around the -pi<->pi transition, we want to avoid averaging // over phi values that might differ by 2pi. in that case, for cells with negative // phi we add two pi, so that we average phi values all close to pi - if (isClusterPhiNearPi && phi < 0.) + if (isClusterPhiNearPi && phi < 0.) { phi += TMath::TwoPi(); + } + if (systemID == systemID_EMB && isResetModuleID && module_id > nModules[k]/2) { + module_id -= nModules[k]; // transition near 1535..0, reset the module ID + } + if (m_thetaRecalcLayerWeights[k][layer]<0) sumThetaLayer[layer+startPositionToFill] += (eCell * theta); else sumThetaLayer[layer+startPositionToFill] += (weightLog * theta); sumWeightLayer[layer+startPositionToFill] += weightLog; sumPhiLayer[layer+startPositionToFill] += (eCell * phi); - } + + // do pi0/photon shape var only for EMB + if (m_do_photon_shapeVar && systemID == systemID_EMB) { + // E, theta_id, and module_id of cells in layer + vec_E_cell_layer[layer+startPositionToFill].push_back(eCell); + vec_theta_cell_layer[layer+startPositionToFill].push_back(theta_id); + vec_module_cell_layer[layer+startPositionToFill].push_back(module_id); + // sum them for width in theta/module calculation + if (m_do_widthTheta_logE_weights) { + theta2_E_layer[layer+startPositionToFill] += theta_id * theta_id * weightLog; + theta_E_layer[layer+startPositionToFill] += theta_id * weightLog; + } else { + theta2_E_layer[layer+startPositionToFill] += theta_id * theta_id * eCell; + theta_E_layer[layer+startPositionToFill] += theta_id * eCell; + } + module2_E_layer[layer+startPositionToFill] += module_id * module_id * eCell; + module_E_layer[layer+startPositionToFill] += module_id * eCell; + } + } // end of loop over cells + } // end of loop over each system / readout + + // local maxima (could be more than one) and the corresponding theta + std::vector, std::vector>> theta_E_pair; + std::vector> local_E_Max(numLayersTotal, std::vector()); + std::vector> local_E_Max_theta(numLayersTotal, std::vector()); + + startPositionToFill = 0; + for (size_t k = 0; k < m_readoutNames.size(); k++) { + if (!m_do_photon_shapeVar) break; + if (m_systemIDs[k] != systemID_EMB) continue; // do pi0/photon shape var only for EMB + if (k > 0) startPositionToFill += m_numLayers[k - 1]; + // loop over layers + for (unsigned layer = 0; layer < m_numLayers[k]; layer++) { + // in case there's no cell in this layer (sometimes in layer 0) + if (vec_E_cell_layer[layer+startPositionToFill].empty()) { + vec_E_cell_layer[layer+startPositionToFill].push_back(0); + vec_theta_cell_layer[layer+startPositionToFill].push_back(0); + vec_module_cell_layer[layer+startPositionToFill].push_back(0); + } + auto result = MergeSumAndSort(vec_theta_cell_layer[layer+startPositionToFill], vec_E_cell_layer[layer+startPositionToFill]); + + // fill the zero energy cells in 1D theta-E profile + for (int i = result.first.front(); i <= result.first.back(); i += nMergedThetaCells[layer+startPositionToFill]) { + if (std::find(result.first.begin(), result.first.end(), i) == result.first.end()) { + auto it = std::lower_bound(result.first.begin(), result.first.end(), i); + int idx = it - result.first.begin(); + result.first.insert(it, i); + result.second.insert(result.second.begin() + idx, 0); + } + } + theta_E_pair.push_back(result); + + // loop over theta IDs to find the local E maxima + for (size_t i = 0; i < theta_E_pair[layer+startPositionToFill].second.size(); i ++) { + //std::cout << i << " " << theta_E_pair[layer+startPositionToFill].first[i] << " " << theta_E_pair[layer+startPositionToFill].second[i] << std::endl; + if ( + (i == 0 && theta_E_pair[layer+startPositionToFill].second[i] > theta_E_pair[layer+startPositionToFill].second[i+1]) || + (i == theta_E_pair[layer+startPositionToFill].second.size()-1 && theta_E_pair[layer+startPositionToFill].second[i] > theta_E_pair[layer+startPositionToFill].second[i-1]) || + (i != 0 && i != (theta_E_pair[layer+startPositionToFill].second.size()-1) && + theta_E_pair[layer+startPositionToFill].second[i] > theta_E_pair[layer+startPositionToFill].second[i-1] && + theta_E_pair[layer+startPositionToFill].second[i] > theta_E_pair[layer+startPositionToFill].second[i+1]) + ) { + local_E_Max[layer+startPositionToFill].push_back(theta_E_pair[layer+startPositionToFill].second[i]); + local_E_Max_theta[layer+startPositionToFill].push_back(theta_E_pair[layer+startPositionToFill].first[i]); + } + } // end of loop over theta IDs + + if (local_E_Max[layer+startPositionToFill].empty()) { + E_cell_Max[layer+startPositionToFill] = 0.; + E_cell_secMax[layer+startPositionToFill] = 0.; + E_cell_Max_theta[layer+startPositionToFill] = 0.; + E_cell_secMax_theta[layer+startPositionToFill] = 0.; + E_cell_Min[layer+startPositionToFill] = 0.; + } else if (local_E_Max[layer+startPositionToFill].size() < 2) { + E_cell_Max[layer+startPositionToFill] = local_E_Max[layer+startPositionToFill][0]; + E_cell_secMax[layer+startPositionToFill] = 0.; + E_cell_Max_theta[layer+startPositionToFill] = local_E_Max_theta[layer+startPositionToFill][0]; + E_cell_secMax_theta[layer+startPositionToFill] = local_E_Max_theta[layer+startPositionToFill][0]; + E_cell_Min[layer+startPositionToFill] = 0.; + } else { + std::vector sortedVec = local_E_Max[layer+startPositionToFill]; + // move the top 2 max to the beginning + std::partial_sort(sortedVec.begin(), sortedVec.begin()+2, sortedVec.end(), std::greater()); + E_cell_Max[layer+startPositionToFill] = sortedVec[0]; + E_cell_secMax[layer+startPositionToFill] = sortedVec[1]; + // get the corresponding theta IDs + auto it_Max = std::find(local_E_Max[layer+startPositionToFill].begin(), local_E_Max[layer+startPositionToFill].end(), sortedVec[0]); + int index_Max = std::distance(local_E_Max[layer+startPositionToFill].begin(), it_Max); + auto it_secMax = std::find(local_E_Max[layer+startPositionToFill].begin(), local_E_Max[layer+startPositionToFill].end(), sortedVec[1]); + int index_secMax = std::distance(local_E_Max[layer+startPositionToFill].begin(), it_secMax); + E_cell_Max_theta[layer+startPositionToFill] = local_E_Max_theta[layer+startPositionToFill][index_Max]; + E_cell_secMax_theta[layer+startPositionToFill] = local_E_Max_theta[layer+startPositionToFill][index_secMax]; + // find the E_min inside the theta range of E_cell_Max and E_cell_secMax + // the theta_E_pair are sorted in ascending order of thetaID + for (size_t i = 0; i < theta_E_pair[layer+startPositionToFill].second.size(); i ++) { + if ( + (theta_E_pair[layer+startPositionToFill].first[i] > std::min(E_cell_Max_theta[layer+startPositionToFill], E_cell_secMax_theta[layer+startPositionToFill])) && + (theta_E_pair[layer+startPositionToFill].first[i] < std::max(E_cell_Max_theta[layer+startPositionToFill], E_cell_secMax_theta[layer+startPositionToFill])) && + (theta_E_pair[layer+startPositionToFill].second[i] < E_cell_Min[layer+startPositionToFill]) + ) + { + E_cell_Min[layer+startPositionToFill] = theta_E_pair[layer+startPositionToFill].second[i]; + } + } + } + if (E_cell_Min[layer+startPositionToFill] > 1e12) E_cell_Min[layer+startPositionToFill] = 0.; // check E_cell_Min + } // end of loop over layers + } + + // local maxima of E vs phi (could be more than one) and the corresponding module + std::vector, std::vector>> module_E_pair; + std::vector> local_E_Max_vs_phi(numLayersTotal, std::vector()); + std::vector> local_E_Max_vs_phi_module(numLayersTotal, std::vector()); + + startPositionToFill = 0; + for (size_t k = 0; k < m_readoutNames.size(); k++) { + if (!m_do_photon_shapeVar) break; + if (m_systemIDs[k] != systemID_EMB) continue; // do pi0/photon shape var only for EMB + if (k > 0) startPositionToFill += m_numLayers[k - 1]; + // loop over layers + for (unsigned layer = 0; layer < m_numLayers[k]; layer++) { + auto result_2 = MergeSumAndSort(vec_module_cell_layer[layer+startPositionToFill], vec_E_cell_layer[layer+startPositionToFill]); + // fill the zero energy cells in 1D module-E profile + for (int i = result_2.first.front(); i <= result_2.first.back(); i += nMergedModules[layer+startPositionToFill]) { + if (std::find(result_2.first.begin(), result_2.first.end(), i) == result_2.first.end()) { + auto it = std::lower_bound(result_2.first.begin(), result_2.first.end(), i); + int idx = it - result_2.first.begin(); + result_2.first.insert(it, i); + result_2.second.insert(result_2.second.begin() + idx, 0); + } + } + module_E_pair.push_back(result_2); + + // loop over module IDs to find the local E maxima + for (size_t i = 0; i < module_E_pair[layer+startPositionToFill].second.size(); i ++) { + //std::cout << i << " " << module_E_pair[layer+startPositionToFill].first[i] << " " << module_E_pair[layer+startPositionToFill].second[i] << std::endl; + if ( + (i == 0 && module_E_pair[layer+startPositionToFill].second[i] > module_E_pair[layer+startPositionToFill].second[i+1]) || + (i == module_E_pair[layer+startPositionToFill].second.size()-1 && module_E_pair[layer+startPositionToFill].second[i] > module_E_pair[layer+startPositionToFill].second[i-1]) || + (i != 0 && i != (module_E_pair[layer+startPositionToFill].second.size()-1) && + module_E_pair[layer+startPositionToFill].second[i] > module_E_pair[layer+startPositionToFill].second[i-1] && + module_E_pair[layer+startPositionToFill].second[i] > module_E_pair[layer+startPositionToFill].second[i+1]) + ) { + local_E_Max_vs_phi[layer+startPositionToFill].push_back(module_E_pair[layer+startPositionToFill].second[i]); + local_E_Max_vs_phi_module[layer+startPositionToFill].push_back(module_E_pair[layer+startPositionToFill].first[i]); + } + } // end of loop over module IDs + + if (local_E_Max_vs_phi[layer+startPositionToFill].empty()) { + E_cell_vs_phi_Max[layer+startPositionToFill] = 0.; + E_cell_vs_phi_secMax[layer+startPositionToFill] = 0.; + E_cell_vs_phi_Max_module[layer+startPositionToFill] = 0; + E_cell_vs_phi_secMax_module[layer+startPositionToFill] = 0; + E_cell_vs_phi_Min[layer+startPositionToFill] = 0.; + } + else if (local_E_Max_vs_phi[layer+startPositionToFill].size() < 2) { + E_cell_vs_phi_Max[layer+startPositionToFill] = local_E_Max_vs_phi[layer+startPositionToFill][0]; + E_cell_vs_phi_secMax[layer+startPositionToFill] = 0.; + E_cell_vs_phi_Max_module[layer+startPositionToFill] = local_E_Max_vs_phi_module[layer+startPositionToFill][0]; + E_cell_vs_phi_secMax_module[layer+startPositionToFill] = 0; + E_cell_vs_phi_Min[layer+startPositionToFill] = 0.; + } + else { + std::vector sortedVec = local_E_Max_vs_phi[layer+startPositionToFill]; + // move the top 2 max to the beginning + std::partial_sort(sortedVec.begin(), sortedVec.begin()+2, sortedVec.end(), std::greater()); + E_cell_vs_phi_Max[layer+startPositionToFill] = sortedVec[0]; + E_cell_vs_phi_secMax[layer+startPositionToFill] = sortedVec[1]; + // get the corresponding module IDs + auto it_Max = std::find(local_E_Max_vs_phi[layer+startPositionToFill].begin(), local_E_Max_vs_phi[layer+startPositionToFill].end(), sortedVec[0]); + int index_Max = std::distance(local_E_Max_vs_phi[layer+startPositionToFill].begin(), it_Max); + auto it_secMax = std::find(local_E_Max_vs_phi[layer+startPositionToFill].begin(), local_E_Max_vs_phi[layer+startPositionToFill].end(), sortedVec[1]); + int index_secMax = std::distance(local_E_Max_vs_phi[layer+startPositionToFill].begin(), it_secMax); + E_cell_vs_phi_Max_module[layer+startPositionToFill] = local_E_Max_vs_phi_module[layer+startPositionToFill][index_Max]; + E_cell_vs_phi_secMax_module[layer+startPositionToFill] = local_E_Max_vs_phi_module[layer+startPositionToFill][index_secMax]; + // find the E_min inside the module range of E_cell_Max and E_cell_secMax + for (size_t i = 0; i < module_E_pair[layer+startPositionToFill].second.size(); i ++) { + if ( + (module_E_pair[layer+startPositionToFill].first[i] > std::min(E_cell_vs_phi_Max_module[layer+startPositionToFill], E_cell_vs_phi_secMax_module[layer+startPositionToFill])) && + (module_E_pair[layer+startPositionToFill].first[i] < std::max(E_cell_vs_phi_Max_module[layer+startPositionToFill], E_cell_vs_phi_secMax_module[layer+startPositionToFill])) && + (module_E_pair[layer+startPositionToFill].second[i] < E_cell_vs_phi_Min[layer+startPositionToFill]) + ) + { + E_cell_vs_phi_Min[layer+startPositionToFill] = module_E_pair[layer+startPositionToFill].second[i]; + } + } + } + if (E_cell_vs_phi_Min[layer+startPositionToFill] > 1e12) E_cell_vs_phi_Min[layer+startPositionToFill] = 0.; // check E_cell_Min + } // end of loop over layers } // save energy and theta/phi positions per layer in shape parameters @@ -216,9 +596,10 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev for (size_t k = 0; k < m_readoutNames.size(); k++) { if (k > 0) - { startPositionToFill += m_numLayers[k - 1]; - } + + int systemID = m_systemIDs[k]; + // loop over layers for (unsigned layer = 0; layer < m_numLayers[k]; layer++) { // theta @@ -245,11 +626,207 @@ StatusCode AugmentClustersFCCee::execute([[maybe_unused]] const EventContext &ev // make sure phi is in range -pi..pi if (sumPhiLayer[layer+startPositionToFill] > TMath::Pi()) sumPhiLayer[layer+startPositionToFill] -= TMath::TwoPi(); - newCluster.addToShapeParameters(sumEnLayer[layer+startPositionToFill] / E); + + newCluster.addToShapeParameters(sumEnLayer[layer+startPositionToFill] / E); // E fraction of layer newCluster.addToShapeParameters(sumThetaLayer[layer+startPositionToFill]); newCluster.addToShapeParameters(sumPhiLayer[layer+startPositionToFill]); - } - } - } + + // do pi0/photon shape var only for EMB + if (m_do_photon_shapeVar && systemID == systemID_EMB) { + double w_theta; + if (m_do_widthTheta_logE_weights) { + w_theta = (sumWeightLayer[layer+startPositionToFill] != 0.) ? sqrt(theta2_E_layer[layer+startPositionToFill] / sumWeightLayer[layer+startPositionToFill] - std::pow(theta_E_layer[layer+startPositionToFill] / sumWeightLayer[layer+startPositionToFill], 2)) : 0. ; + } else { + w_theta = (sumEnLayer[layer+startPositionToFill] > 0.) ? sqrt(theta2_E_layer[layer+startPositionToFill] / sumEnLayer[layer+startPositionToFill] - std::pow(theta_E_layer[layer+startPositionToFill] / sumEnLayer[layer+startPositionToFill], 2)) : 0. ; + } + width_theta[layer+startPositionToFill] = w_theta; + + double w_module = (sumEnLayer[layer+startPositionToFill] > 0.) ? sqrt(module2_E_layer[layer+startPositionToFill] / sumEnLayer[layer+startPositionToFill] - std::pow(module_E_layer[layer+startPositionToFill] / sumEnLayer[layer+startPositionToFill], 2)) : 0. ; + width_module[layer+startPositionToFill] = w_module; + + double Ratio_E = (E_cell_Max[layer+startPositionToFill] - E_cell_secMax[layer+startPositionToFill]) / + (E_cell_Max[layer+startPositionToFill] + E_cell_secMax[layer+startPositionToFill]); + if (E_cell_Max[layer+startPositionToFill] + E_cell_secMax[layer+startPositionToFill] == 0) Ratio_E = 1.; + Ratio_E_max_2ndmax[layer+startPositionToFill] = Ratio_E; + Delta_E_2ndmax_min[layer+startPositionToFill] = E_cell_secMax[layer+startPositionToFill] - E_cell_Min[layer+startPositionToFill]; + + double Ratio_E_vs_phi = (E_cell_vs_phi_Max[layer+startPositionToFill] - E_cell_vs_phi_secMax[layer+startPositionToFill]) / + (E_cell_vs_phi_Max[layer+startPositionToFill] + E_cell_vs_phi_secMax[layer+startPositionToFill]); + if (E_cell_vs_phi_Max[layer+startPositionToFill] + E_cell_vs_phi_secMax[layer+startPositionToFill] == 0.) Ratio_E_vs_phi = 1.; + Ratio_E_max_2ndmax_vs_phi[layer+startPositionToFill] = Ratio_E_vs_phi; + Delta_E_2ndmax_min_vs_phi[layer+startPositionToFill] = E_cell_vs_phi_secMax[layer+startPositionToFill] - E_cell_vs_phi_Min[layer+startPositionToFill]; + + if (local_E_Max[layer+startPositionToFill].size() > 0) { + double E_m1 = 0.; + double E_p1 = 0.; + int theta_m1 = 0; + int theta_p1 = 0; + double E_m2 = 0.; + double E_p2 = 0.; + int theta_m2 = 0; + int theta_p2 = 0; + double E_m3 = 0.; + double E_p3 = 0.; + int theta_m3 = 0; + int theta_p3 = 0; + double E_m4 = 0.; + double E_p4 = 0.; + int theta_m4 = 0; + int theta_p4 = 0; + auto it_1 = std::find(theta_E_pair[layer+startPositionToFill].second.begin(), theta_E_pair[layer+startPositionToFill].second.end(), E_cell_Max[layer+startPositionToFill]); + int ind_1 = std::distance(theta_E_pair[layer+startPositionToFill].second.begin(), it_1); + + if (ind_1-1 >= 0) { + E_m1 = theta_E_pair[layer].second[ind_1-1]; + theta_m1 = theta_E_pair[layer].first[ind_1-1]; + } else { + E_m1 = 0; + theta_m1 = 0; + } + if (static_cast(ind_1+1) < theta_E_pair[layer].second.size()) { + E_p1 = theta_E_pair[layer].second[ind_1+1]; + theta_p1 = theta_E_pair[layer].first[ind_1+1]; + } else { + E_p1 = 0; + theta_p1 = 0; + } + + if (ind_1-2 >= 0) { + E_m2 = theta_E_pair[layer].second[ind_1-2]; + theta_m2 = theta_E_pair[layer].first[ind_1-2]; + } else { + E_m2 = 0; + theta_m2 = 0; + } + if (static_cast(ind_1+2) < theta_E_pair[layer].second.size()) { + E_p2 = theta_E_pair[layer].second[ind_1+2]; + theta_p2 = theta_E_pair[layer].first[ind_1+2]; + } else { + E_p2 = 0; + theta_p2 = 0; + } + + if (ind_1-3 >= 0) { + E_m3 = theta_E_pair[layer].second[ind_1-3]; + theta_m3 = theta_E_pair[layer].first[ind_1-3]; + } else { + E_m3 = 0; + theta_m3 = 0; + } + if (static_cast(ind_1+3) < theta_E_pair[layer].second.size()) { + E_p3 = theta_E_pair[layer].second[ind_1+3]; + theta_p3 = theta_E_pair[layer].first[ind_1+3]; + } else { + E_p3 = 0; + theta_p3 = 0; + } + + if (ind_1-4 >= 0) { + E_m4 = theta_E_pair[layer].second[ind_1-4]; + theta_m4 = theta_E_pair[layer].first[ind_1-4]; + } else { + E_m4 = 0; + theta_m4 = 0; + } + if (static_cast(ind_1+4) < theta_E_pair[layer].second.size()) { + E_p4 = theta_E_pair[layer].second[ind_1+4]; + theta_p4 = theta_E_pair[layer].first[ind_1+4]; + } else { + E_p4 = 0; + theta_p4 = 0; + } + // sum_E_nBin is also used in E fraction side calculation so put that outside the if block + double sum_E_3Bin = E_m1 + E_cell_Max[layer+startPositionToFill] + E_p1; + double sum_E_5Bin = sum_E_3Bin + E_m2 + E_p2; + double sum_E_7Bin = sum_E_5Bin + E_m3 + E_p3; + double sum_E_9Bin = sum_E_7Bin + E_m4 + E_p4; + + if (m_do_widthTheta_logE_weights) { + double weightLog_E_max = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_cell_Max[layer+startPositionToFill] / sumEnLayer[layer+startPositionToFill])); + double weightLog_m1 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_m1 / sumEnLayer[layer+startPositionToFill])); + double weightLog_m2 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_m2 / sumEnLayer[layer+startPositionToFill])); + double weightLog_m3 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_m3 / sumEnLayer[layer+startPositionToFill])); + double weightLog_m4 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_m4 / sumEnLayer[layer+startPositionToFill])); + double weightLog_p1 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_p1 / sumEnLayer[layer+startPositionToFill])); + double weightLog_p2 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_p2 / sumEnLayer[layer+startPositionToFill])); + double weightLog_p3 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_p3 / sumEnLayer[layer+startPositionToFill])); + double weightLog_p4 = std::max(0., m_thetaRecalcLayerWeights[k][layer] + log(E_p4 / sumEnLayer[layer+startPositionToFill])); + + double sum_weightLog_3Bin = weightLog_E_max + weightLog_m1 + weightLog_p1; + double sum_weightLog_5Bin = sum_weightLog_3Bin + weightLog_m2 + weightLog_p2; + double sum_weightLog_7Bin = sum_weightLog_5Bin + weightLog_m3 + weightLog_p3; + double sum_weightLog_9Bin = sum_weightLog_7Bin + weightLog_m4 + weightLog_p4; + + double theta2_E_3Bin = theta_m1 * theta_m1 * weightLog_m1 + E_cell_Max_theta[layer+startPositionToFill] * E_cell_Max_theta[layer+startPositionToFill] * weightLog_E_max + theta_p1 * theta_p1 * weightLog_p1; + double theta_E_3Bin = theta_m1 * weightLog_m1 + E_cell_Max_theta[layer+startPositionToFill] * weightLog_E_max + theta_p1 * weightLog_p1; + double theta2_E_5Bin = theta2_E_3Bin + theta_m2 * theta_m2 * weightLog_m2 + theta_p2 * theta_p2 * weightLog_p2; + double theta_E_5Bin = theta_E_3Bin + theta_m2 * weightLog_m2 + theta_p2 * weightLog_p2; + double theta2_E_7Bin = theta2_E_5Bin + theta_m3 * theta_m3 * weightLog_m3 + theta_p3 * theta_p3 * weightLog_p3; + double theta_E_7Bin = theta_E_5Bin + theta_m3 * weightLog_m3 + theta_p3 * weightLog_p3; + double theta2_E_9Bin = theta2_E_7Bin + theta_m4 * theta_m4 * weightLog_m4 + theta_p4 * theta_p4 * weightLog_p4; + double theta_E_9Bin = theta_E_7Bin + theta_m4 * weightLog_m4 + theta_p4 * weightLog_p4; + + double _w_theta_3Bin = sqrt(theta2_E_3Bin / sum_weightLog_3Bin - std::pow(theta_E_3Bin / sum_weightLog_3Bin, 2)); + double _w_theta_5Bin = sqrt(theta2_E_5Bin / sum_weightLog_5Bin - std::pow(theta_E_5Bin / sum_weightLog_5Bin, 2)); + double _w_theta_7Bin = sqrt(theta2_E_7Bin / sum_weightLog_7Bin - std::pow(theta_E_7Bin / sum_weightLog_7Bin, 2)); + double _w_theta_9Bin = sqrt(theta2_E_9Bin / sum_weightLog_9Bin - std::pow(theta_E_9Bin / sum_weightLog_9Bin, 2)); + + width_theta_3Bin[layer+startPositionToFill] = (sum_weightLog_3Bin > 0.) ? _w_theta_3Bin : 0. ; + width_theta_5Bin[layer+startPositionToFill] = (sum_weightLog_5Bin > 0.) ? _w_theta_5Bin : 0. ; + width_theta_7Bin[layer+startPositionToFill] = (sum_weightLog_7Bin > 0.) ? _w_theta_7Bin : 0. ; + width_theta_9Bin[layer+startPositionToFill] = (sum_weightLog_9Bin > 0.) ? _w_theta_9Bin : 0. ; + } else { + double theta2_E_3Bin = theta_m1 * theta_m1 * E_m1 + E_cell_Max_theta[layer+startPositionToFill] * E_cell_Max_theta[layer+startPositionToFill] * E_cell_Max[layer+startPositionToFill] + theta_p1 * theta_p1 * E_p1; + double theta_E_3Bin = theta_m1 * E_m1 + E_cell_Max_theta[layer+startPositionToFill] * E_cell_Max[layer+startPositionToFill] + theta_p1 * E_p1; + double theta2_E_5Bin = theta2_E_3Bin + theta_m2 * theta_m2 * E_m2 + theta_p2 * theta_p2 * E_p2; + double theta_E_5Bin = theta_E_3Bin + theta_m2 * E_m2 + theta_p2 * E_p2; + double theta2_E_7Bin = theta2_E_5Bin + theta_m3 * theta_m3 * E_m3 + theta_p3 * theta_p3 * E_p3; + double theta_E_7Bin = theta_E_5Bin + theta_m3 * E_m3 + theta_p3 * E_p3; + double theta2_E_9Bin = theta2_E_7Bin + theta_m4 * theta_m4 * E_m4 + theta_p4 * theta_p4 * E_p4; + double theta_E_9Bin = theta_E_7Bin + theta_m4 * E_m4 + theta_p4 * E_p4; + + double _w_theta_3Bin = sqrt(theta2_E_3Bin / sum_E_3Bin - std::pow(theta_E_3Bin / sum_E_3Bin, 2)); + double _w_theta_5Bin = sqrt(theta2_E_5Bin / sum_E_5Bin - std::pow(theta_E_5Bin / sum_E_5Bin, 2)); + double _w_theta_7Bin = sqrt(theta2_E_7Bin / sum_E_7Bin - std::pow(theta_E_7Bin / sum_E_7Bin, 2)); + double _w_theta_9Bin = sqrt(theta2_E_9Bin / sum_E_9Bin - std::pow(theta_E_9Bin / sum_E_9Bin, 2)); + + width_theta_3Bin[layer+startPositionToFill] = (sum_E_3Bin > 0.) ? _w_theta_3Bin : 0. ; + width_theta_5Bin[layer+startPositionToFill] = (sum_E_5Bin > 0.) ? _w_theta_5Bin : 0. ; + width_theta_7Bin[layer+startPositionToFill] = (sum_E_7Bin > 0.) ? _w_theta_7Bin : 0. ; + width_theta_9Bin[layer+startPositionToFill] = (sum_E_9Bin > 0.) ? _w_theta_9Bin : 0. ; + } + E_fr_side_pm2[layer+startPositionToFill] = (sum_E_3Bin > 0.) ? (sum_E_5Bin / sum_E_3Bin - 1.) : 0. ; + E_fr_side_pm3[layer+startPositionToFill] = (sum_E_3Bin > 0.) ? (sum_E_7Bin / sum_E_3Bin - 1.) : 0. ; + E_fr_side_pm4[layer+startPositionToFill] = (sum_E_3Bin > 0.) ? (sum_E_9Bin / sum_E_3Bin - 1.) : 0. ; + } else { + width_theta_3Bin[layer+startPositionToFill] = 0.; + width_theta_5Bin[layer+startPositionToFill] = 0.; + width_theta_7Bin[layer+startPositionToFill] = 0.; + width_theta_9Bin[layer+startPositionToFill] = 0.; + E_fr_side_pm2[layer+startPositionToFill] = 0.; + E_fr_side_pm3[layer+startPositionToFill] = 0.; + E_fr_side_pm4[layer+startPositionToFill] = 0.; + } + newCluster.addToShapeParameters(maxCellEnergyInLayer[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_theta[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_module[layer+startPositionToFill]); + newCluster.addToShapeParameters(Ratio_E_max_2ndmax[layer+startPositionToFill]); + newCluster.addToShapeParameters(Delta_E_2ndmax_min[layer+startPositionToFill]); + newCluster.addToShapeParameters(Ratio_E_max_2ndmax_vs_phi[layer+startPositionToFill]); + newCluster.addToShapeParameters(Delta_E_2ndmax_min_vs_phi[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_theta_3Bin[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_theta_5Bin[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_theta_7Bin[layer+startPositionToFill]); + newCluster.addToShapeParameters(width_theta_9Bin[layer+startPositionToFill]); + newCluster.addToShapeParameters(E_fr_side_pm2[layer+startPositionToFill]); + newCluster.addToShapeParameters(E_fr_side_pm3[layer+startPositionToFill]); + newCluster.addToShapeParameters(E_fr_side_pm4[layer+startPositionToFill]); + } + } // end of loop over layers + } // end of loop over system/readout + newCluster.addToShapeParameters(p4cl.M()); + newCluster.addToShapeParameters(nCells); + } // end of loop over clusters + return StatusCode::SUCCESS; } diff --git a/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.h b/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.h index c96f0eb4..e702fe42 100644 --- a/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.h +++ b/RecFCCeeCalorimeter/src/components/AugmentClustersFCCee.h @@ -10,17 +10,22 @@ class IGeoSvc; #include "GaudiKernel/Algorithm.h" #include "GaudiKernel/ToolHandle.h" +// DD4HEP +//#include "DDSegmentation/Segmentation.h" + // EDM4HEP namespace edm4hep { class ClusterCollection; } +// DD4HEP namespace dd4hep { namespace DDSegmentation { class BitFieldCoder; + class Segmentation; } } @@ -31,6 +36,7 @@ namespace dd4hep * * @author Alexis Maloizel * @author Giovanni Marchiori + * @author Tong Li */ class AugmentClustersFCCee : public Gaudi::Algorithm @@ -50,11 +56,11 @@ class AugmentClustersFCCee : public Gaudi::Algorithm /// Handle for the cluster shape metadata to read and to write MetaDataHandle> m_inShapeParameterHandle{ m_inClusters, - edm4hep::shapeParameterNames, + edm4hep::labels::ShapeParameterNames, Gaudi::DataHandle::Reader}; MetaDataHandle> m_showerShapeHandle{ m_outClusters, - edm4hep::shapeParameterNames, + edm4hep::labels::ShapeParameterNames, Gaudi::DataHandle::Writer}; /// Pointer to the geometry service @@ -65,14 +71,16 @@ class AugmentClustersFCCee : public Gaudi::Algorithm /// Name of the detectors (for the metadata) Gaudi::Property> m_detectorNames{ this, "systemNames", {"EMB"}, "Names of the detectors, corresponding to systemIDs"}; + /// systemID of EMB, should be retrieved from mydetector.constantAsDouble("DetID_ECAL_Barrel") + int systemID_EMB; /// Numbers of layers of the detectors Gaudi::Property> m_numLayers{ - this, "numLayers", {12}, "Numbers of layers of the systems"}; + this, "numLayers", {11}, "Numbers of layers of the systems"}; /// Weights for each detector layer for theta position log-weighting Gaudi::Property>> m_thetaRecalcLayerWeights{ this, "thetaRecalcWeights", - {{-1, 3.0, 3.0, 3.0, 4.25, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}}, + {{-1, 3.0, 3.0, 3.0, 4.25, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}}, "Weights for each detector layer for theta position log-weighting. If negative use linear weight."}; /// Name of the detector readouts, corresponding to system IDs in m_systemIDs Gaudi::Property> m_readoutNames{ @@ -80,6 +88,20 @@ class AugmentClustersFCCee : public Gaudi::Algorithm /// Name of the layer/cell field Gaudi::Property> m_layerFieldNames{ this, "layerFieldNames", {"layer"}, "Identifiers of layers, corresponding to systemIDs"}; + Gaudi::Property> m_thetaFieldNames{ + this, "thetaFieldNames", {"theta"}, "Identifiers of theta, corresponding to systemIDs"}; + Gaudi::Property> m_moduleFieldNames{ + this, "moduleFieldNames", {"module"}, "Identifiers of module, corresponding to systemIDs"}; + Gaudi::Property m_do_photon_shapeVar{ + this, "do_photon_shapeVar", false, "Calculate shape variables for pi0/photon separation: E_ratio, Delta_E etc."}; + Gaudi::Property m_do_widthTheta_logE_weights{ + this, "do_widthTheta_logE_weights", false, "Calculate width in theta using logE weights in shape variables"}; + + // the number of grouped theta and phi cells + std::vector nMergedThetaCells; + std::vector nMergedModules; + // the number of modules / phi cells + std::vector nModules; }; #endif /* RECFCCEECALORIMETER_AUGMENTCLUSTERSFCCEE_H */ diff --git a/RecFCCeeCalorimeter/src/components/CalibrateCaloClusters.cpp b/RecFCCeeCalorimeter/src/components/CalibrateCaloClusters.cpp index e0043aee..2963d1cf 100644 --- a/RecFCCeeCalorimeter/src/components/CalibrateCaloClusters.cpp +++ b/RecFCCeeCalorimeter/src/components/CalibrateCaloClusters.cpp @@ -97,7 +97,7 @@ StatusCode CalibrateCaloClusters::initialize() } // read from the metadata the names of the shape parameters in the input clusters and append the total raw energy to the output - std::vector shapeParameters = m_inShapeParameterHandle.get(); + std::vector shapeParameters = m_inShapeParameterHandle.get({}); shapeParameters.push_back("rawE"); m_outShapeParameterHandle.put(shapeParameters); @@ -241,7 +241,10 @@ StatusCode CalibrateCaloClusters::readCalibrationFile(const std::string &calibra debug() << "Input Node Name/Shape (" << m_input_names.size() << "):" << endmsg; for (std::size_t i = 0; i < m_ortSession->GetInputCount(); i++) { - m_input_names.insert(m_input_names.end(), m_ortSession->GetInputNames().begin(), m_ortSession->GetInputNames().end()); + // for old ONNX runtime version + // m_input_names.emplace_back(m_ortSession->GetInputName(i, allocator)); + // for new runtime version + m_input_names.emplace_back(m_ortSession->GetInputNameAllocated(i, allocator).get()); m_input_shapes = m_ortSession->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); debug() << "\t" << m_input_names.at(i) << " : "; for (std::size_t k = 0; k < m_input_shapes.size() - 1; k++) @@ -263,7 +266,10 @@ StatusCode CalibrateCaloClusters::readCalibrationFile(const std::string &calibra debug() << "Output Node Name/Shape (" << m_output_names.size() << "):" << endmsg; for (std::size_t i = 0; i < m_ortSession->GetOutputCount(); i++) { - m_output_names.insert(m_output_names.end(), m_ortSession->GetOutputNames().begin(), m_ortSession->GetOutputNames().end()); + // for old ONNX runtime version + // m_output_names.emplace_back(m_ortSession->GetOutputName(i, allocator)); + // for new runtime version + m_output_names.emplace_back(m_ortSession->GetOutputNameAllocated(i, allocator).get()); m_output_shapes = m_ortSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); debug() << "\t" << m_output_names.at(i) << " : "; for (std::size_t k = 0; k < m_output_shapes.size() - 1; k++) diff --git a/RecFCCeeCalorimeter/src/components/CreateCaloCellPositionsFCCee.cpp b/RecFCCeeCalorimeter/src/components/CreateCaloCellPositionsFCCee.cpp index 4f19b705..a29793ca 100644 --- a/RecFCCeeCalorimeter/src/components/CreateCaloCellPositionsFCCee.cpp +++ b/RecFCCeeCalorimeter/src/components/CreateCaloCellPositionsFCCee.cpp @@ -35,7 +35,11 @@ StatusCode CreateCaloCellPositionsFCCee::initialize() { } // Copy over the CellIDEncoding string from the input collection to the output collection - m_positionedHitsCellIDEncoding.put(m_hitsCellIDEncoding.get()); + std::string hitsEncoding = m_hitsCellIDEncoding.get(""); + if (hitsEncoding == "") { + warning() << "Missing cellID encoding for input collection" << endmsg; + } + m_positionedHitsCellIDEncoding.put(hitsEncoding); return StatusCode::SUCCESS; } diff --git a/RecFCCeeCalorimeter/src/components/PhotonIDTool.cpp b/RecFCCeeCalorimeter/src/components/PhotonIDTool.cpp new file mode 100644 index 00000000..1e07dbd8 --- /dev/null +++ b/RecFCCeeCalorimeter/src/components/PhotonIDTool.cpp @@ -0,0 +1,419 @@ +#include "PhotonIDTool.h" + +// our EDM +#include "edm4hep/Cluster.h" +#include "edm4hep/ClusterCollection.h" + +#include + +#include "nlohmann/json.hpp" + +using json = nlohmann::json; + + +DECLARE_COMPONENT(PhotonIDTool) + +// convert vector data with given shape into ONNX runtime tensor +template +Ort::Value vec_to_tensor(std::vector &data, const std::vector &shape) +{ + Ort::MemoryInfo mem_info = + Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault); + auto tensor = Ort::Value::CreateTensor(mem_info, data.data(), data.size(), shape.data(), shape.size()); + return tensor; +} + +PhotonIDTool::PhotonIDTool(const std::string &name, + ISvcLocator *svcLoc) + : Gaudi::Algorithm(name, svcLoc) +{ + declareProperty("inClusters", m_inClusters, "Input cluster collection"); + declareProperty("outClusters", m_outClusters, "Output cluster collection"); +} + +StatusCode PhotonIDTool::initialize() +{ + // Initialize base class + { + StatusCode sc = Gaudi::Algorithm::initialize(); + if (sc.isFailure()) + { + return sc; + } + } + + // read the files defining the model + StatusCode sc = readMVAFiles(m_mvaInputsFile, m_mvaModelFile); + if (sc.isFailure()) + { + error() << "Initialization of photon ID tool config files not successful!" << endmsg; + return sc; + } + + // read from the metadata the names of the shape parameters in the input clusters + std::vector shapeParameters = m_inShapeParameterHandle.get({}); + debug() << "Variables in shapeParameters of input clusters:" << endmsg; + for (const auto &str : shapeParameters) { + debug() << str << endmsg; + } + + // check if the shape parameters contain the inputs needed for the inference + m_inputPositionsInShapeParameters.clear(); + for (const auto &feature : m_internal_input_names) { + + if (feature == "ecl") { + // for the cluster energy, check if we have rawE in decorations + // this is for cluster that have been passed through the MVA calibration + // otherwise, we will use the energy of the cluster object + auto it = std::find(shapeParameters.begin(), shapeParameters.end(), "rawE"); + if (it != shapeParameters.end()) + { + int position = std::distance(shapeParameters.begin(), it); + m_inputPositionsInShapeParameters.push_back(position); + info() << "Feature " << feature << " found in position " << position << " of shapeParameters" << endmsg; + } + else { + m_inputPositionsInShapeParameters.push_back(-1); + } + } + else { + // for the other features, check if they are in the shape parameters + auto it = std::find(shapeParameters.begin(), shapeParameters.end(), feature); + if (it != shapeParameters.end()) + { + int position = std::distance(shapeParameters.begin(), it); + m_inputPositionsInShapeParameters.push_back(position); + info() << "Feature " << feature << " found in position " << position << " of shapeParameters" << endmsg; + } + else + { + // at least one of the inputs of the MVA was not found in the shapeParameters + // so we can stop checking the others + m_inputPositionsInShapeParameters.clear(); + error() << "Feature " << feature << " not found, aborting..." << endmsg; + return StatusCode::FAILURE; + } + } + } + + // append the MVA score to the output shape parameters + shapeParameters.push_back("photonIDscore"); + m_outShapeParameterHandle.put(shapeParameters); + + info() << "Initialized the photonID MVA tool" << endmsg; + return StatusCode::SUCCESS; +} + +StatusCode PhotonIDTool::execute([[maybe_unused]] const EventContext &evtCtx) const +{ + verbose() << "-------------------------------------------" << endmsg; + + // Get the input collection with clusters + const edm4hep::ClusterCollection *inClusters = m_inClusters.get(); + + // Initialize output clusters + edm4hep::ClusterCollection *outClusters = initializeOutputClusters(inClusters); + if (!outClusters) + { + error() << "Something went wrong in initialization of the output cluster collection, exiting!" << endmsg; + return StatusCode::FAILURE; + } + if (inClusters->size() != outClusters->size()) + { + error() << "Sizes of input and output cluster collections does not match, exiting!" << endmsg; + return StatusCode::FAILURE; + } + + // Run inference + { + StatusCode sc = applyMVAtoClusters(inClusters, outClusters); + if (sc.isFailure()) + { + return sc; + } + } + + return StatusCode::SUCCESS; +} + +StatusCode PhotonIDTool::finalize() +{ + if (m_ortSession) + delete m_ortSession; + if (m_ortEnv) + delete m_ortEnv; + + return Gaudi::Algorithm::finalize(); +} + +edm4hep::ClusterCollection *PhotonIDTool::initializeOutputClusters( + const edm4hep::ClusterCollection *inClusters) const +{ + edm4hep::ClusterCollection *outClusters = m_outClusters.createAndPut(); + + for (auto const &inCluster : *inClusters) + { + auto outCluster = inCluster.clone(); + outClusters->push_back(outCluster); + } + + return outClusters; +} + +StatusCode PhotonIDTool::readMVAFiles(const std::string& mvaInputsFileName, + const std::string& mvaModelFileName) +{ + // 1. read the file with the list of input features + // Open the JSON file + std::ifstream file(mvaInputsFileName); + if (!file.is_open()) { + error() << "Error opening file: " << mvaInputsFileName << endmsg; + return StatusCode::FAILURE; + } + + // Parse the JSON file + json j; + try { + file >> j; + } catch (const nlohmann::json::exception& e) { + error() << "Error parsing JSON: " << e.what() << endmsg; + return StatusCode::FAILURE; + } + file.close(); + + // Access the data and print to screen + std::string timeStamp; + if (!j.contains("timeStamp")) { + error() << "Error: timeStamp key not found in JSON" << endmsg; + return StatusCode::FAILURE; + } + else { + timeStamp = j["timeStamp"]; + } + + std::string clusterCollection; + if (!j.contains("clusterCollection")) { + error() << "Error: clusterCollection key not found in JSON" << endmsg; + return StatusCode::FAILURE; + } + else { + clusterCollection = j["clusterCollection"]; + } + + std::string trainingTool; + if (!j.contains("trainingTool")) { + error() << "Error: trainingTool key not found in JSON" << endmsg; + return StatusCode::FAILURE; + } + else { + trainingTool = j["trainingTool"]; + } + + info() << "Using the following photon-ID training:" << endmsg; + info() << " Timestamp: " << timeStamp << endmsg; + info() << " Training tool used: " << trainingTool << endmsg; + info() << " Input cluster collection: " << clusterCollection << endmsg; + if (!j.contains("shapeParameters")) { + error() << "Error: shapeParameters key not found in JSON" << endmsg; + return StatusCode::FAILURE; + } + else { + try { + const auto& shape_params = j["shapeParameters"]; + if (!shape_params.is_array()) { + throw std::runtime_error("shapeParameters is not an array"); + } + for (const auto& param : shape_params) { + if (!param.is_string()) { + throw std::runtime_error("shapeParameters contains non-string values"); + } + m_internal_input_names.push_back(param.get()); + } + } catch (const std::exception& e) { + error() << "Error: " << e.what() << endmsg; + return StatusCode::FAILURE; + } + } + info() << " Input shape parameters:" << endmsg; + for (const auto &str : m_internal_input_names) { + info() << " " << str << endmsg; + } + if (!j.contains("trainingParameters")) { + error() << "Error: trainingParameters key not found in JSON" << endmsg; + return StatusCode::FAILURE; + } + else { + info() << " Training parameters:" << endmsg; + for (const auto ¶m : j["trainingParameters"].items()) { + std::string key = param.key(); + std::string value; + if (param.value().is_string()) { + value = param.value().get(); + } + else if (param.value().is_number()) { + value = std::to_string(param.value().get()); + } + else if (param.value().is_null()) { + value = "null"; + } + else { + value = "invalid"; + } + info() << " " << key << " : " << value << endmsg; + } + } + + + // 2. - read the file with the MVA model and setup the ONNX runtime + // set ONNX logging level based on output level of this alg + OrtLoggingLevel loggingLevel = ORT_LOGGING_LEVEL_WARNING; + MSG::Level outputLevel = this->msgStream().level(); + switch (outputLevel) + { + case MSG::Level::FATAL: // 6 + loggingLevel = ORT_LOGGING_LEVEL_FATAL; // 4 + break; + case MSG::Level::ERROR: // 5 + loggingLevel = ORT_LOGGING_LEVEL_ERROR; // 3 + break; + case MSG::Level::WARNING: // 4 + loggingLevel = ORT_LOGGING_LEVEL_WARNING; // 2 + break; + case MSG::Level::INFO: // 3 + loggingLevel = ORT_LOGGING_LEVEL_WARNING; // 2 (ORT_LOGGING_LEVEL_INFO too verbose..) + break; + case MSG::Level::DEBUG: // 2 + loggingLevel = ORT_LOGGING_LEVEL_INFO; // 1 + break; + case MSG::Level::VERBOSE: // 1 + loggingLevel = ORT_LOGGING_LEVEL_VERBOSE; // 0 + break; + default: + break; + } + try + { + m_ortEnv = new Ort::Env(loggingLevel, "ONNX runtime environment for photonID"); + Ort::SessionOptions session_options; + session_options.SetIntraOpNumThreads(1); + m_ortSession = new Ort::Experimental::Session(*m_ortEnv, const_cast(mvaModelFileName), session_options); + // m_ortSession = new Ort::Session(*m_ortEnv, const_cast(mvaModelFileName), session_options); + } + catch (const Ort::Exception &exception) + { + error() << "ERROR setting up ONNX runtime environment: " << exception.what() << endmsg; + return StatusCode::FAILURE; + } + + // print name/shape of inputs + // use default allocator (CPU) + Ort::AllocatorWithDefaultOptions allocator; + debug() << "Input Node Name/Shape (" << m_ortSession->GetInputCount() << "):" << endmsg; + for (std::size_t i = 0; i < m_ortSession->GetInputCount(); i++) + { + // for old ONNX runtime version + // m_input_names.emplace_back(m_ortSession->GetInputName(i, allocator)); + // for new runtime version + m_input_names.emplace_back(m_ortSession->GetInputNameAllocated(i, allocator).get()); + m_input_shapes = m_ortSession->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + debug() << "\t" << m_input_names.at(i) << " : "; + for (std::size_t k = 0; k < m_input_shapes.size() - 1; k++) + { + debug() << m_input_shapes[k] << "x"; + } + debug() << m_input_shapes[m_input_shapes.size() - 1] << endmsg; + } + // some models might have negative shape values to indicate dynamic shape, e.g., for variable batch size. + for (auto &s : m_input_shapes) + { + if (s < 0) + { + s = 1; + } + } + + // print name/shape of outputs + debug() << "Output Node Name/Shape (" << m_ortSession->GetOutputCount() << "):" << endmsg; + for (std::size_t i = 0; i < m_ortSession->GetOutputCount(); i++) + { + // for old ONNX runtime version + // m_output_names.emplace_back(m_ortSession->GetOutputName(i, allocator)); + // for new runtime version + m_output_names.emplace_back(m_ortSession->GetOutputNameAllocated(i, allocator).get()); + m_output_shapes = m_ortSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape(); + debug() << m_output_shapes.size() << endmsg; + debug() << "\t" << m_output_names.at(i) << " : "; + for (std::size_t k = 0; k < m_output_shapes.size() - 1; k++) + { + debug() << m_output_shapes[k] << "x"; + } + debug() << m_output_shapes[m_output_shapes.size() - 1] << endmsg; + } + + debug() << "PhotonID config files read out successfully" << endmsg; + + return StatusCode::SUCCESS; +} + +StatusCode PhotonIDTool::applyMVAtoClusters(const edm4hep::ClusterCollection *inClusters, + edm4hep::ClusterCollection *outClusters) const +{ + size_t numShapeVars = m_internal_input_names.size(); + std::vector mvaInputs(numShapeVars); + + // loop over the input clusters and perform the inference + for (unsigned int j = 0; j < inClusters->size(); ++j) + { + // read the values of the input features + for (unsigned int i = 0; i < m_inputPositionsInShapeParameters.size(); i++) { + int position = m_inputPositionsInShapeParameters[i]; + if (position == -1) + mvaInputs[i] = (inClusters->at(j)).getEnergy(); + else + mvaInputs[i] = (inClusters->at(j)).getShapeParameters(position); + } + + // print the values of the input features + verbose() << "MVA inputs:" << endmsg; + for (unsigned short int k = 0; k < numShapeVars; ++k) + { + verbose() << "var " << k << " : " << mvaInputs[k] << endmsg; + } + + // run the MVA and save the output score in output + float score= -1.0; + // Create a single Ort tensor + std::vector input_tensors; + input_tensors.emplace_back(vec_to_tensor(mvaInputs, m_input_shapes)); + + // pass data through model + try + { + std::vector output_tensors = m_ortSession->Run(m_input_names, + input_tensors, + m_output_names, + Ort::RunOptions{nullptr}); + + // double-check the dimensions of the output tensors + // NOTE: the number of output tensors is equal to the number of output nodes specified in the Run() call + // assert(output_tensors.size() == output_names.size() && output_tensors[0].IsTensor()); + // the probabilities are in the 2nd entry of the output + debug() << output_tensors.size() << endmsg; + debug() << output_tensors[1].GetTensorTypeAndShapeInfo().GetShape() << endmsg; + float *outputData = output_tensors[1].GetTensorMutableData(); + for (int i=0; i<2; i++) + debug() << i << " " << outputData[i] << endmsg; + score = outputData[1]; + } + catch (const Ort::Exception &exception) + { + error() << "ERROR running model inference: " << exception.what() << endmsg; + return StatusCode::FAILURE; + } + + verbose() << "Photon ID score: " << score << endmsg; + outClusters->at(j).addToShapeParameters(score); + } + + return StatusCode::SUCCESS; +} diff --git a/RecFCCeeCalorimeter/src/components/PhotonIDTool.h b/RecFCCeeCalorimeter/src/components/PhotonIDTool.h new file mode 100644 index 00000000..7edaab3c --- /dev/null +++ b/RecFCCeeCalorimeter/src/components/PhotonIDTool.h @@ -0,0 +1,115 @@ +#ifndef RECFCCEECALORIMETER_PHOTONIDTOOL_H +#define RECFCCEECALORIMETER_PHOTONIDTOOL_H + +// Key4HEP +#include "k4FWCore/DataHandle.h" +#include "k4FWCore/MetaDataHandle.h" + +// Gaudi +#include "GaudiKernel/Algorithm.h" +#include "GaudiKernel/ToolHandle.h" +#include "GaudiKernel/MsgStream.h" +class IGeoSvc; + +// EDM4HEP +namespace edm4hep { + class Cluster; + class ClusterCollection; +} + +// ONNX +#include "onnxruntime/core/session/experimental_onnxruntime_cxx_api.h" + +/** @class PhotonIDTool + * + * Apply a binary MVA classifier to discriminate between photons and pi0s. + * It takes a cluster collection in inputs, runs the inference using as inputs + * the variables in the shapeParameters of the input clusters, decorates the + * cluster with the photon probability (appended to the shapeParameters vector) + * and saves the cluster in a new output collection. + * + * @author Giovanni Marchiori + */ + +class PhotonIDTool : public Gaudi::Algorithm { + +public: + PhotonIDTool(const std::string& name, ISvcLocator* svcLoc); + + virtual StatusCode initialize(); + + virtual StatusCode execute(const EventContext& evtCtx) const; + + virtual StatusCode finalize(); + +private: + /** + * Initialize output calorimeter cluster collection. + * + * @param[in] inClusters Pointer to the input cluster collection. + * + * @return Pointer to the output cluster collection. + */ + edm4hep::ClusterCollection* initializeOutputClusters(const edm4hep::ClusterCollection* inClusters) const; + + /** + * Load file with MVA model into memory. + * + * @return Status code. + */ + StatusCode readMVAFiles(const std::string& mvaInputsFileName, + const std::string& mvaModelFileName); + + /** + * Calculate the MVA score for the input clusters and adds it + * as a new shapeParameter of the output clusters + * + * @param[in] inClusters Pointer to the input cluster collection. + * @param[out] outClusters Pointer to the output cluster collection. + * + * @return Status code. + */ + StatusCode applyMVAtoClusters(const edm4hep::ClusterCollection* inClusters, + edm4hep::ClusterCollection* outClusters) const; + + /// Handle for input calorimeter clusters collection + mutable DataHandle m_inClusters { + "inClusters", Gaudi::DataHandle::Reader, this + }; + + /// Handle for output calorimeter clusters collection + mutable DataHandle m_outClusters { + "outClusters", Gaudi::DataHandle::Writer, this + }; + + /// Handles for the cluster shower shape metadata to read and to write + MetaDataHandle> m_inShapeParameterHandle{ + m_inClusters, + edm4hep::labels::ShapeParameterNames, + Gaudi::DataHandle::Reader}; + MetaDataHandle> m_outShapeParameterHandle{ + m_outClusters, + edm4hep::labels::ShapeParameterNames, + Gaudi::DataHandle::Writer}; + + /// Files with the MVA model and list of inputs + Gaudi::Property m_mvaModelFile { + this, "mvaModelFile", {}, "ONNX file with the mva model"}; + Gaudi::Property m_mvaInputsFile { + this, "mvaInputsFile", {}, "JSON file with the mva inputs"}; + + // the ONNX runtime session for running the inference, + // the environment, and the input and output shapes and names + Ort::Experimental::Session* m_ortSession = nullptr; + Ort::Env* m_ortEnv = nullptr; + std::vector m_input_shapes; + std::vector m_output_shapes; + std::vector m_input_names; + std::vector m_output_names; + std::vector m_internal_input_names; + + // the indices of the shapeParameters containing the inputs to the model (-1 if not found) + std::vector m_inputPositionsInShapeParameters; +}; + +#endif /* RECFCCEECALORIMETER_PHOTONIDTOOL_H */ diff --git a/RecFCCeeCalorimeter/tests/options/reproduceSegfault.py b/RecFCCeeCalorimeter/tests/options/reproduceSegfault.py deleted file mode 100644 index 487ee72b..00000000 --- a/RecFCCeeCalorimeter/tests/options/reproduceSegfault.py +++ /dev/null @@ -1,326 +0,0 @@ -# - - -import os - -from GaudiKernel.SystemOfUnits import MeV, GeV, tesla - -use_pythia = False - -# Input for simulations (momentum is expected in GeV!) -# Parameters for the particle gun simulations, dummy if use_pythia is set to True -# theta from 80 to 100 degrees corresponds to -0.17 < eta < 0.17 -momentum = 1 # in GeV -thetaMin = 90.25 # degrees -thetaMax = 90.25 # degrees -#thetaMin = 50 # degrees -#thetaMax = 130 # degrees -pdgCode = 111 # 11 electron, 13 muon, 22 photon, 111 pi0, 211 pi+ -magneticField = False - -from Gaudi.Configuration import * - -from Configurables import k4DataSvc -podioevent = k4DataSvc("EventDataSvc") - -################## Particle gun setup -_pi = 3.14159 - -from Configurables import GenAlg -genAlg = GenAlg() -if use_pythia: - from Configurables import PythiaInterface - pythia8gentool = PythiaInterface() - pythia8gentool.Filename = "MCGeneration/ee_Z_ee.cmd" - genAlg.SignalProvider = pythia8gentool -else: - from Configurables import MomentumRangeParticleGun - pgun = MomentumRangeParticleGun("ParticleGun_Electron") - pgun.PdgCodes = [pdgCode] - pgun.MomentumMin = momentum * GeV - pgun.MomentumMax = momentum * GeV - pgun.PhiMin = 0 - #pgun.PhiMax = 0 - pgun.PhiMax = 2 * _pi - pgun.ThetaMin = thetaMin * _pi / 180. - pgun.ThetaMax = thetaMax * _pi / 180. - genAlg.SignalProvider = pgun - -genAlg.hepmc.Path = "hepmc" - -from Configurables import HepMCToEDMConverter -hepmc_converter = HepMCToEDMConverter() -hepmc_converter.hepmc.Path="hepmc" -genParticlesOutputName = "genParticles" -hepmc_converter.GenParticles.Path = genParticlesOutputName -hepmc_converter.hepmcStatusList = [] - -################## Simulation setup -# Detector geometry -from Configurables import GeoSvc -geoservice = GeoSvc("GeoSvc") -# if FCC_DETECTORS is empty, this should use relative path to working directory -path_to_detectors = os.environ.get("K4GEO", "") -detectors = [ - 'FCCee/ALLEGRO/compact/ALLEGRO_o1_v01/ALLEGRO_o1_v01.xml' -] -# prefix all xmls with path_to_detectors -for det in detectors: - geoservice.detectors += [os.path.join(path_to_detectors, det)] -geoservice.OutputLevel = INFO - -# Geant4 service -# Configures the Geant simulation: geometry, physics list and user actions - -# Uncomment if history from Geant4 decays is needed (e.g. to get the photons from pi0) and set actions=actions in SimG4Svc -#from Configurables import SimG4FullSimActions, SimG4Alg, SimG4PrimariesFromEdmTool, SimG4SaveParticleHistory -#actions = SimG4FullSimActions() -#actions.enableHistory=True -#actions.energyCut = 0.2 * GeV -#saveHistTool = SimG4SaveParticleHistory("saveHistory") - -from Configurables import SimG4Svc -geantservice = SimG4Svc("SimG4Svc", detector='SimG4DD4hepDetector', physicslist="SimG4FtfpBert", actions="SimG4FullSimActions") - -# Fixed seed to have reproducible results, change it for each job if you split one production into several jobs -# Mind that if you leave Gaudi handle random seed and some job start within the same second (very likely) you will have duplicates -geantservice.randomNumbersFromGaudi = False -geantservice.seedValue = 4242 - -# Range cut -geantservice.g4PreInitCommands += ["/run/setCut 0.1 mm"] - -# Magnetic field -from Configurables import SimG4ConstantMagneticFieldTool -if magneticField == 1: - field = SimG4ConstantMagneticFieldTool("SimG4ConstantMagneticFieldTool", FieldComponentZ=-2*tesla, FieldOn=True,IntegratorStepper="ClassicalRK4") -else: - field = SimG4ConstantMagneticFieldTool("SimG4ConstantMagneticFieldTool",FieldOn=False) - -# Geant4 algorithm -# Translates EDM to G4Event, passes the event to G4, writes out outputs via tools -# and a tool that saves the calorimeter hits - -# Detector readouts -# ECAL -ecalBarrelReadoutName = "ECalBarrelEta" -ecalBarrelReadoutNamePhiEta = "ECalBarrelPhiEta" -# HCAL -hcalReadoutName = "HCalBarrelReadout" -extHcalReadoutName = "HCalExtBarrelReadout" - -# Configure saving of calorimeter hits -ecalBarrelHitsName = "ECalBarrelPositionedHits" -from Configurables import SimG4SaveCalHits -saveECalBarrelTool = SimG4SaveCalHits("saveECalBarrelHits", readoutNames = [ecalBarrelReadoutName]) -saveECalBarrelTool.CaloHits.Path = ecalBarrelHitsName - -saveHCalTool = SimG4SaveCalHits("saveHCalBarrelHits", readoutNames = [hcalReadoutName]) -saveHCalTool.CaloHits.Path = "HCalBarrelPositionedHits" - -# next, create the G4 algorithm, giving the list of names of tools ("XX/YY") -from Configurables import SimG4PrimariesFromEdmTool -particle_converter = SimG4PrimariesFromEdmTool("EdmConverter") -particle_converter.GenParticles.Path = genParticlesOutputName - -from Configurables import SimG4Alg -geantsim = SimG4Alg("SimG4Alg", - outputs= [saveECalBarrelTool, - saveHCalTool, - #saveHistTool - ], - eventProvider=particle_converter, - OutputLevel=INFO) - -############## Digitization (Merging hits into cells, EM scale calibration) -# EM scale calibration (sampling fraction) -from Configurables import CalibrateInLayersTool -calibEcalBarrel = CalibrateInLayersTool("CalibrateECalBarrel", - samplingFraction = [0.3015372769511699] * 1 + [0.11149483835280927] * 1 + [0.13606757625633936] * 1 + [0.15166527482797484] * 1 + [0.1632488357891111] * 1 + [0.17266802625003083] * 1 + [0.17979275037206174] * 1 + [0.18684819895019078] * 1 + [0.19186331727529204] * 1 + [0.1974122727924478] * 1 + [0.20215095335650032] * 1 + [0.22557145948990787] * 1, - readoutName = ecalBarrelReadoutName, - layerFieldName = "layer") - -from Configurables import CalibrateCaloHitsTool -calibHcells = CalibrateCaloHitsTool("CalibrateHCal", invSamplingFraction="41.66") - -# Create cells in ECal barrel -# 1. step - merge hits into cells with Eta and module segmentation (phi module is a 'physical' cell i.e. lead + LAr + PCB + LAr +lead) -# 2. step - rewrite the cellId using the Eta-Phi segmentation (merging several modules into one phi readout cell) -from Configurables import CreateCaloCells -createEcalBarrelCellsStep1 = CreateCaloCells("CreateECalBarrelCellsStep1", - doCellCalibration=True, - calibTool = calibEcalBarrel, - addCellNoise=False, filterCellNoise=False, - addPosition=True, - OutputLevel=INFO, - hits=ecalBarrelHitsName, - cells="ECalBarrelCellsStep1") - -## Use Phi-Theta segmentation in ECal barrel -from Configurables import RedoSegmentation -resegmentEcalBarrel = RedoSegmentation("ReSegmentationEcal", - # old bitfield (readout) - oldReadoutName = ecalBarrelReadoutName, - # specify which fields are going to be altered (deleted/rewritten) - oldSegmentationIds = ["module"], - # new bitfield (readout), with new segmentation - newReadoutName = ecalBarrelReadoutNamePhiEta, - OutputLevel = INFO, - inhits = "ECalBarrelCellsStep1", - outhits = "ECalBarrelCellsStep2") - -EcalBarrelCellsName = "ECalBarrelCells" -createEcalBarrelCells = CreateCaloCells("CreateECalBarrelCells", - doCellCalibration=False, - addCellNoise=False, filterCellNoise=False, - OutputLevel=INFO, - hits="ECalBarrelCellsStep2", - cells=EcalBarrelCellsName) - -# Ecal barrel cell positions (good for physics, all coordinates set properly) -from Configurables import CellPositionsECalBarrelTool -cellPositionEcalBarrelTool = CellPositionsECalBarrelTool("CellPositionsECalBarrel", readoutName = ecalBarrelReadoutNamePhiEta, OutputLevel = INFO) - -from Configurables import CreateCaloCellPositionsFCCee -createEcalBarrelPositionedCells = CreateCaloCellPositionsFCCee("ECalBarrelPositionedCells", OutputLevel = INFO) -createEcalBarrelPositionedCells.positionsTool = cellPositionEcalBarrelTool -createEcalBarrelPositionedCells.hits.Path = EcalBarrelCellsName -createEcalBarrelPositionedCells.positionedHits.Path = "ECalBarrelPositionedCells" - -# Create cells in HCal -# 1. step - merge hits into cells with the default readout -createHcalBarrelCells = CreateCaloCells("CreateHCaloCells", - doCellCalibration=True, - calibTool=calibHcells, - addCellNoise = False, filterCellNoise = False, - OutputLevel = INFO, - hits="HCalBarrelHits", - cells="HCalBarrelCells") - -# sliding window clustering #FIXME not yet ready for key4hep -#Empty cells for parts of calorimeter not implemented yet -from Configurables import CreateEmptyCaloCellsCollection -createemptycells = CreateEmptyCaloCellsCollection("CreateEmptyCaloCells") -createemptycells.cells.Path = "emptyCaloCells" - -from Configurables import CaloTowerTool -towers = CaloTowerTool("towers", - deltaEtaTower = 0.01, deltaPhiTower = 2*_pi/768., - radiusForPosition = 2160 + 40 / 2.0, - ecalBarrelReadoutName = ecalBarrelReadoutNamePhiEta, - ecalEndcapReadoutName = "", - ecalFwdReadoutName = "", - hcalBarrelReadoutName = "", - hcalExtBarrelReadoutName = "", - hcalEndcapReadoutName = "", - hcalFwdReadoutName = "", - OutputLevel = INFO) -towers.ecalBarrelCells.Path = EcalBarrelCellsName -towers.ecalEndcapCells.Path = "emptyCaloCells" -towers.ecalFwdCells.Path = "emptyCaloCells" -towers.hcalBarrelCells.Path = "emptyCaloCells" -towers.hcalExtBarrelCells.Path = "emptyCaloCells" -towers.hcalEndcapCells.Path = "emptyCaloCells" -towers.hcalFwdCells.Path = "emptyCaloCells" - -# Cluster variables -windE = 9 -windP = 17 -posE = 5 -posP = 11 -dupE = 7 -dupP = 13 -finE = 9 -finP = 17 -# Minimal energy to create a cluster in GeV (FCC-ee detectors have to reconstruct low energy particles) -threshold = 0.1 - -from Configurables import CreateCaloClustersSlidingWindow -createClusters = CreateCaloClustersSlidingWindow("CreateClusters", - towerTool = towers, - nEtaWindow = windE, nPhiWindow = windP, - nEtaPosition = posE, nPhiPosition = posP, - nEtaDuplicates = dupE, nPhiDuplicates = dupP, - nEtaFinal = finE, nPhiFinal = finP, - energyThreshold = threshold, - energySharingCorrection = False, - attachCells = True, - OutputLevel = INFO - ) -createClusters.clusters.Path = "CaloClusters" -createClusters.clusterCells.Path = "CaloClusterCells" - -#from Configurables import CorrectCaloClusters -#correctCaloClusters = CorrectCaloClusters("correctCaloClusters", -# inClusters = createClusters.clusters.Path, -# outClusters = "Corrected"+createClusters.clusters.Path, -# samplingFractions = [[0.3015372769511699] * 1 + [0.11149483835280927] * 1 + [0.13606757625633936] * 1 + [0.15166527482797484] * 1 + [0.1632488357891111] * 1 + [0.17266802625003083] * 1 + [0.17979275037206174] * 1 + [0.18684819895019078] * 1 + [0.19186331727529204] * 1 + [0.1974122727924478] * 1 + [0.20215095335650032] * 1 + [0.22557145948990787] * 1], -# numLayers = [12], -# firstLayerIDs = [0], -# lastLayerIDs = [11], -# readoutNames = [ecalBarrelReadoutNamePhiEta], -# upstreamParameters = [[0.0912344950407701, -11.69061425151683, -178.55685321769735, 1.6808795238147223, -22.684217073808675, -50.206009157875414]], -# upstreamFormulas = [['[0]+[1]/(x-[2])', '[0]+[1]/(x-[2])']], -# downstreamParameters = [[0.0024194025802062973, 0.008158908740253542, 1.644457047375556, -1.841481149132812, 0.027658240849727546, 8.727867137116235]], -# downstreamFormulas = [['[0]+[1]*x', '[0]+[1]/sqrt(x)', '[0]+[1]/x']], -# OutputLevel = INFO -# ) - - - -createEcalBarrelPositionedCaloClusterCells = CreateCaloCellPositionsFCCee("ECalBarrelPositionedCaloClusterCells", OutputLevel = INFO) -createEcalBarrelPositionedCaloClusterCells.positionsTool = cellPositionEcalBarrelTool -createEcalBarrelPositionedCaloClusterCells.hits.Path = "CaloClusterCells" -createEcalBarrelPositionedCaloClusterCells.positionedHits.Path = "PositionedCaloClusterCells" - -################ Output -from Configurables import PodioOutput -out = PodioOutput("out", - OutputLevel=INFO) - -out.outputCommands = ["keep *", "drop ECalBarrelHits", "drop HCal*", "drop ECalBarrelCellsStep*", "drop ECalBarrelPositionedHits", "drop emptyCaloCells", "drop CaloClusterCells"] - -import uuid -out.filename = "output_fullCalo_SimAndDigi_withCluster_MagneticField_"+str(magneticField)+"_pMin_"+str(momentum*1000)+"_MeV"+"_ThetaMinMax_"+str(thetaMin)+"_"+str(thetaMax)+"_pdgId_"+str(pdgCode)+"_pythia"+str(use_pythia)+"_noClusterSharing.root" - -#CPU information -from Configurables import AuditorSvc, ChronoAuditor -chra = ChronoAuditor() -audsvc = AuditorSvc() -audsvc.Auditors = [chra] -genAlg.AuditExecute = True -hepmc_converter.AuditExecute = True -geantsim.AuditExecute = True -createEcalBarrelCellsStep1.AuditExecute = True -resegmentEcalBarrel.AuditExecute = True -createEcalBarrelCells.AuditExecute = True -#createHcalBarrelCells.AuditExecute = True -out.AuditExecute = True - -from Configurables import EventCounter -event_counter = EventCounter('event_counter') -event_counter.Frequency = 10 - -from Configurables import ApplicationMgr -ApplicationMgr( - TopAlg = [ - event_counter, - genAlg, - hepmc_converter, - geantsim, - createEcalBarrelCellsStep1, - resegmentEcalBarrel, - createEcalBarrelCells, - createEcalBarrelPositionedCells, - #createHcalBarrelCells, - createemptycells, - createClusters, - #correctCaloClusters, - createEcalBarrelPositionedCaloClusterCells, - out - ], - EvtSel = 'NONE', - EvtMax = 10, - ExtSvc = [geoservice, podioevent, geantservice, audsvc], - StopOnSignal = True, - ) diff --git a/RecFCCeeCalorimeter/tests/options/runCaloXTalkNeighbours.py b/RecFCCeeCalorimeter/tests/options/runCaloXTalkNeighbours.py index 3f9d7afe..6be028ba 100644 --- a/RecFCCeeCalorimeter/tests/options/runCaloXTalkNeighbours.py +++ b/RecFCCeeCalorimeter/tests/options/runCaloXTalkNeighbours.py @@ -10,7 +10,7 @@ path_to_detector = os.environ.get("K4GEO", "") print(path_to_detector) detectors_to_use = [ - 'FCCee/ALLEGRO/compact/ALLEGRO_o1_v02/ALLEGRO_o1_v02.xml' + 'FCCee/ALLEGRO/compact/ALLEGRO_o1_v03/ALLEGRO_o1_v03.xml' ] # prefix all xmls with path_to_detector @@ -24,7 +24,7 @@ systemNames=["system"], systemValues=[4], activeFieldNames=["layer"], - activeVolumesNumbers=[12], + activeVolumesNumbers=[11], OutputLevel=DEBUG) # ApplicationMgr diff --git a/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged.py b/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged.py index 812b61f8..a636f062 100644 --- a/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged.py +++ b/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged.py @@ -26,6 +26,7 @@ from Configurables import GenAlg from Configurables import FCCDataSvc from Configurables import RewriteBitfield +from Configurables import ReadCaloCrosstalkMap from Gaudi.Configuration import INFO import os @@ -310,11 +311,18 @@ # (merging several modules and severla theta readout cells). # Add noise at this step if you derived the noise already assuming merged cells +# read the crosstalk map +readCrosstalkMap = ReadCaloCrosstalkMap("ReadCrosstalkMap", + fileName="https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/xtalk_neighbours_map_ecalB_thetamodulemerged.root", + OutputLevel=INFO) + # Step 1: merge hits into cells according to initial segmentation ecalBarrelCellsName = "ECalBarrelCells" createEcalBarrelCells = CreateCaloCells("CreateECalBarrelCells", doCellCalibration=True, calibTool=calibEcalBarrel, + crosstalksTool=readCrosstalkMap, + addCrosstalk=False, addCellNoise=False, filterCellNoise=False, addPosition=True, diff --git a/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged_SW_benchmarkCorr.py b/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged_SW_benchmarkCorr.py index 04f08712..691b1ab1 100644 --- a/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged_SW_benchmarkCorr.py +++ b/RecFCCeeCalorimeter/tests/options/run_thetamodulemerged_SW_benchmarkCorr.py @@ -24,6 +24,7 @@ from Configurables import HepMCToEDMConverter from Configurables import GenAlg from Configurables import FCCDataSvc +from Configurables import ReadCaloCrosstalkMap from Gaudi.Configuration import INFO # from Gaudi.Configuration import * @@ -303,11 +304,18 @@ # (merging several modules and severla theta readout cells). # Add noise at this step if you derived the noise already assuming merged cells +# read the crosstalk map +readCrosstalkMap = ReadCaloCrosstalkMap("ReadCrosstalkMap", + fileName="https://fccsw.web.cern.ch/fccsw/filesForSimDigiReco/ALLEGRO/ALLEGRO_o1_v03/xtalk_neighbours_map_ecalB_thetamodulemerged.root", + OutputLevel=INFO) + # Step 1: merge hits into cells according to initial segmentation ecalBarrelCellsName = "ECalBarrelCells" createEcalBarrelCells = CreateCaloCells("CreateECalBarrelCells", doCellCalibration=True, calibTool=calibEcalBarrel, + crosstalksTool=readCrosstalkMap, + addCrosstalk=False, addCellNoise=False, filterCellNoise=False, addPosition=True,