-
Previous volume |
-
Back to index |
+
Previous volume |
+
Back to index |
Next volume
Error Parameterisation : volume {vid}
Generated: {date}
diff --git a/Examples/Scripts/MaterialMapping/CMakeLists.txt b/Examples/Scripts/MaterialMapping/CMakeLists.txt
index f921781cfca..f1a5dae6d1d 100644
--- a/Examples/Scripts/MaterialMapping/CMakeLists.txt
+++ b/Examples/Scripts/MaterialMapping/CMakeLists.txt
@@ -5,4 +5,3 @@ install(
TARGETS
ActsAnalysisMaterialComposition
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
-
diff --git a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C
index 46af22beed4..deeb7c275a3 100644
--- a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C
+++ b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C
@@ -17,7 +17,7 @@
/// Draw and save the histograms.
void plot(std::vector
Map, std::vector detectors, const std::string& name){
-
+
std::string sVol = "Detector volumes :";
for(auto const& det: detectors) {
sVol += " ";
@@ -98,8 +98,8 @@ void Initialise_hist(std::vector& detector_hist){
/// Fill the histograms for the detector.
void Fill(std::vector& detector_hist, const std::string& input_file, std::vector detectors, const int& nbprocess){
-
-
+
+
Initialise_hist(detector_hist);
//Get file, tree and set top branch address
@@ -129,7 +129,7 @@ void Fill(std::vector& detector_hist, const std::string& input_file, std:
tree->SetBranchAddress("sur_type",&sur_type);
tree->SetBranchAddress("vol_id",&vol_id);
-
+
int nentries = tree->GetEntries();
if(nentries > nbprocess && nbprocess != -1) nentries = nbprocess;
// Loop over all the material tracks.
@@ -144,7 +144,7 @@ void Fill(std::vector& detector_hist, const std::string& input_file, std:
for(int j=0; jsize(); j++ ){
Acts::GeometryIdentifier ID;
-
+
if(sur_id->at(j) != 0){
ID = Acts::GeometryIdentifier(sur_id->at(j));
}
diff --git a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C
index 798730fb27c..dfbb12effa9 100644
--- a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C
+++ b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C
@@ -22,7 +22,7 @@ void plot_ratio(std::vector Map_prop, std::vector Map_geant, std::
Proj_eta_prop->Divide(Unit_Map_prop->ProjectionX());
TH1D *Proj_eta_geant = (TH1D*) Map_geant[0]->ProjectionX()->Clone();
Proj_eta_geant->Divide(Unit_Map_geant->ProjectionX());
-
+
TH1D *Proj_phi_prop = (TH1D*) Map_prop[0]->ProjectionY()->Clone();
Proj_phi_prop->Divide(Unit_Map_prop->ProjectionY());
TH1D *Proj_phi_geant = (TH1D*) Map_geant[0]->ProjectionY()->Clone();
@@ -77,7 +77,7 @@ void plot_ratio(std::vector Map_prop, std::vector Map_geant, std::
delete vol;
delete Unit_Map_prop;
delete Unit_Map_geant;
-}
+}
/// Plot the material ratio between the geantino scan and the map validation for each detector.
diff --git a/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh b/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh
index c6829b2c1b3..aa1945b92f3 100644
--- a/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh
+++ b/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# We first run a single batch of jobs using the geant4 material track as an input.
-# This will allow us to obtain a new material track file with the material associated with their respective surfaces.
+# This will allow us to obtain a new material track file with the material associated with their respective surfaces.
# This file is then move to the input directory using it will allow us to speed up the following mapping by 50%
python3 ../Examples/Scripts/Python/material_mapping_optimisation.py --numberOfJobs 40 --topNumberOfEvents 10000 --inputPath "MaterialMappingInputDir" --outputPath "MaterialMappingOutputDir" --doPloting 2>&1 | tee log/opti_log_init.txt
mv MaterialMappingOutputDir/optimised-material-map_tracks.root MaterialMappingInputDir/optimised-material-map_tracks.root
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py
index 290d38e6bf8..caa1d9068c9 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py
@@ -17,7 +17,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame:
"""Read the dataset from the different file, remove the pure duplicate tracks and combine the datasets"""
"""
@param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually)
- @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event
+ @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event
"""
data = []
for f in CKS_files:
@@ -32,7 +32,7 @@ def prepareInferenceData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]:
"""Prepare the data"""
"""
@param[in] data: input DataFrame to be prepared
- @return: array of the network input and the corresponding truth
+ @return: array of the network input and the corresponding truth
"""
# Remove truth and useless variable
target_column = "good/duplicate/fake"
@@ -68,7 +68,7 @@ def clusterTracks(
@param[in] event: input DataFrame that contain all track in one event
@param[in] DBSCAN_eps: minimum radius used by the DBSCAN to cluster track together
@param[in] DBSCAN_min_samples: minimum number of tracks needed for DBSCAN to create a cluster
- @return: DataFrame identical to the output with an added column with the cluster
+ @return: DataFrame identical to the output with an added column with the cluster
"""
# Perform the DBSCAN clustering and sort the Db by cluster ID
trackDir = event[["eta", "phi"]].to_numpy()
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py
index 12c5fd1e392..15f596414f7 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py
@@ -11,7 +11,7 @@ def prepareDataSet(data: pd.DataFrame) -> pd.DataFrame:
"""Format the dataset that have been written from the Csv file"""
"""
@param[in] data: input DataFrame containing 1 event
- @return: Formatted DataFrame
+ @return: Formatted DataFrame
"""
data = data
# Remove tracks with less than 7 measurements
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py
index 183ed24e851..ec098334764 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py
@@ -10,7 +10,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame:
"""Read the dataset from the different file, remove the pure duplicate tracks and combine the datasets"""
"""
@param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually)
- @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event
+ @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event
"""
data = []
for f in CKS_files:
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py
index 96ebdcc8eb4..80ad297f30d 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py
@@ -15,7 +15,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame:
"""Read the dataset from the different files, remove the pure duplicate tracks and combine the datasets"""
"""
@param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually)
- @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events
+ @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events
"""
data = []
for f in CKS_files:
@@ -29,7 +29,7 @@ def prepareInferenceData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]:
"""Prepare the data"""
"""
@param[in] data: input DataFrame to be prepared
- @return: array of the network input and the corresponding truth
+ @return: array of the network input and the corresponding truth
"""
# Remove truth and useless variable
target_column = "good/duplicate/fake"
@@ -60,7 +60,7 @@ def clusterSeed(
@param[in] event: input DataFrame that contain all track in one event
@param[in] DBSCAN_eps: minimum radius used by the DBSCAN to cluster track together
@param[in] DBSCAN_min_samples: minimum number of tracks needed for DBSCAN to create a cluster
- @return: DataFrame identical to the output with an added column with the cluster
+ @return: DataFrame identical to the output with an added column with the cluster
"""
# Perform the DBSCAN clustering and sort the Db by cluster ID
trackDir = event[["eta", "phi", "vertexZ", "pT"]].to_numpy()
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py
index a0b4c74107e..d161a868347 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py
@@ -11,7 +11,7 @@ def prepareDataSet(data: pd.DataFrame) -> pd.DataFrame:
"""Format the dataset that have been written from the Csv file"""
"""
@param[in] data: input DataFrame containing 1 event
- @return: Formatted DataFrame
+ @return: Formatted DataFrame
"""
# Sort by particle ID
data = data.sort_values("particleId")
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py b/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py
index 3a2c69be005..71d86a2dcd9 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py
@@ -22,7 +22,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame:
"""Read the dataset from the different files, remove the pure duplicate tracks and combine the datasets"""
"""
@param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually)
- @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events
+ @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events
"""
data = pd.DataFrame()
for f in CKS_files:
@@ -41,7 +41,7 @@ def prepareTrainingData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]:
"""Prepare the data"""
"""
@param[in] data: input DataFrame to be prepared
- @return: array of the network input and the corresponding truth
+ @return: array of the network input and the corresponding truth
"""
# Remove truth and useless variable
target_column = "good/duplicate/fake"
@@ -80,7 +80,7 @@ def batchSplit(data: pd.DataFrame, batch_size: int) -> list[pd.DataFrame]:
"""
@param[in] data: input DataFrame to be cut into batch
@param[in] batch_size: Number of truth particles per batch
- @return: list of DataFrame, each element correspond to a batch
+ @return: list of DataFrame, each element correspond to a batch
"""
batch = []
pid = data[0][0]
@@ -108,7 +108,7 @@ def computeLoss(
) -> torch.Tensor:
"""Compute one loss for each duplicate track associated with the particle"""
"""
- @param[in] score_good: score return by the model for the good track associated with this particle
+ @param[in] score_good: score return by the model for the good track associated with this particle
@param[in] score_duplicate: list of the scores of all duplicate track associated with this particle
@param[in] margin: Margin used in the computation of the MarginRankingLoss
@return: return the updated loss
@@ -124,8 +124,8 @@ def computeLoss(
def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, float]:
"""Run the MLP on a batch and compute the corresponding efficiency and loss. If an optimiser is specified train the MLP."""
"""
- @param[in] batch: list of DataFrame, each element correspond to a batch
- @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch.
+ @param[in] batch: list of DataFrame, each element correspond to a batch
+ @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch.
@return: array containing the number of particles, the number of particle where the good track was found and the loss
"""
# number of particles
diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py
index 3a0a54c6e9c..f2fac4d814c 100644
--- a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py
+++ b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py
@@ -26,7 +26,7 @@ def readDataSet(Seed_files: list[str]) -> pd.DataFrame:
"""Read the dataset from the different files, remove the particle with only fakes and combine the datasets"""
"""
@param[in] Seed_files: DataFrame contain the data from each seed files (1 file per events usually)
- @return: combined DataFrame containing all the seed, ordered by events and then by truth particle ID in each events
+ @return: combined DataFrame containing all the seed, ordered by events and then by truth particle ID in each events
"""
data = pd.DataFrame()
for f in Seed_files:
@@ -40,7 +40,7 @@ def prepareTrainingData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]:
"""Prepare the data"""
"""
@param[in] data: input DataFrame to be prepared
- @return: array of the network input and the corresponding truth
+ @return: array of the network input and the corresponding truth
"""
# Remove truth and useless variable
target_column = "good/duplicate/fake"
@@ -74,7 +74,7 @@ def batchSplit(data: pd.DataFrame, batch_size: int) -> list[pd.DataFrame]:
"""
@param[in] data: input DataFrame to be cut into batch
@param[in] batch_size: Number of truth particles per batch
- @return: list of DataFrame, each element correspond to a batch
+ @return: list of DataFrame, each element correspond to a batch
"""
batch = []
pid = data[0][0]
@@ -104,7 +104,7 @@ def computeLoss(
) -> torch.Tensor:
"""Compute one loss for each duplicate seed associated with the particle"""
"""
- @param[in] score_good: score return by the model for the good seed associated with this particle
+ @param[in] score_good: score return by the model for the good seed associated with this particle
@param[in] score_duplicate: list of the scores of all duplicate seed associated with this particle
@param[in] margin_duplicate: Margin used in the computation of the MarginRankingLoss for duplicate seeds
@param[in] margin_fake: Margin used in the computation of the MarginRankingLoss for fake seeds
@@ -130,8 +130,8 @@ def computeLoss(
def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, float]:
"""Run the MLP on a batch and compute the corresponding efficiency and loss. If an optimiser is specify train the MLP."""
"""
- @param[in] batch: list of DataFrame, each element correspond to a batch
- @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch.
+ @param[in] batch: list of DataFrame, each element correspond to a batch
+ @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch.
@return: array containing the number of particles, the number of particle where the good seed was found and the loss
"""
# number of particles
diff --git a/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C b/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C
index 5dfa0fe3de8..04715e27fe7 100644
--- a/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C
+++ b/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C
@@ -25,7 +25,7 @@
/// defines the efficiency, fake rate and duplicaiton rate. It aims to make
/// custom definition and tuning of the reconstruction performance easier.
/// Multiple files for the reconstructed tracks are allowed.
-///
+///
/// NB: It's very likely that fiducal cuts are already imposed on the truth
/// particles. Please check the selection criteria in the truth fitting example
/// which writes out the 'track_finder_particles.root'. For instance, if the
diff --git a/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C b/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C
index 7c14908f778..46451a26710 100644
--- a/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C
+++ b/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C
@@ -18,11 +18,11 @@
#include "CommonUtils.h"
-/// This script allows a fast reading and replotting of the existing performance plots, e.g. 'trackeff_vs_*' and 'nMeasurements_vs_*',
-/// from the root file 'performance_track_fitter.root' or 'performance_ckf.root'.
-/// Note that redefinition of the tracking efficiency etc. is not possible with this script.
+/// This script allows a fast reading and replotting of the existing performance plots, e.g. 'trackeff_vs_*' and 'nMeasurements_vs_*',
+/// from the root file 'performance_track_fitter.root' or 'performance_ckf.root'.
+/// Note that redefinition of the tracking efficiency etc. is not possible with this script.
/// If you want to define your own efficiency etc., please refer to 'defineReconstructionPerformance.C'.
-///
+///
void reconstructionPerformance(std::vector inputFileNames) {
std::array emho = {nullptr, nullptr, nullptr};
std::vector tags = {"eta", "pT"};
@@ -46,7 +46,7 @@ void reconstructionPerformance(std::vector inputFileNames) {
auto file = TFile::Open(fileName.c_str(), "read");
unsigned int itag = 0;
for (const auto& t : tags) {
- unsigned int ipar = 0;
+ unsigned int ipar = 0;
for (const auto& p : params) {
std::string hName = p + std::string("_vs_") + t;
emho[itag]->cd(ipar+1);
diff --git a/Fatras/Geant4/CMakeLists.txt b/Fatras/Geant4/CMakeLists.txt
index e9912af1d97..074f558b78f 100644
--- a/Fatras/Geant4/CMakeLists.txt
+++ b/Fatras/Geant4/CMakeLists.txt
@@ -25,4 +25,4 @@ install(
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
install(
DIRECTORY include/ActsFatras
- DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
\ No newline at end of file
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
diff --git a/LICENSE b/LICENSE
index 14e2f777f6c..a612ad9813b 100644
--- a/LICENSE
+++ b/LICENSE
@@ -35,7 +35,7 @@ Mozilla Public License Version 2.0
means any form of the work other than Source Code Form.
1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
+ means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
diff --git a/Plugins/Detray/CMakeLists.txt b/Plugins/Detray/CMakeLists.txt
index a2a1585063d..a57ffa914a4 100644
--- a/Plugins/Detray/CMakeLists.txt
+++ b/Plugins/Detray/CMakeLists.txt
@@ -2,7 +2,7 @@ add_library(
ActsPluginDetray SHARED
src/DetrayConverter.cpp)
-add_dependencies(ActsPluginDetray
+add_dependencies(ActsPluginDetray
detray::core
covfie::core
vecmem::core)
@@ -12,10 +12,10 @@ target_include_directories(
PUBLIC
$
$)
-
+
target_link_libraries(
- ActsPluginDetray
- PUBLIC
+ ActsPluginDetray
+ PUBLIC
ActsCore
detray::core
detray::core_array
@@ -28,7 +28,7 @@ install(
TARGETS ActsPluginDetray
EXPORT ActsPluginDetrayTargets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
-
+
install(
DIRECTORY include/Acts
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
diff --git a/Plugins/ExaTrkX/CMakeLists.txt b/Plugins/ExaTrkX/CMakeLists.txt
index 11453ce34c6..d2481f585c8 100644
--- a/Plugins/ExaTrkX/CMakeLists.txt
+++ b/Plugins/ExaTrkX/CMakeLists.txt
@@ -1,10 +1,10 @@
-set(SOURCES
+set(SOURCES
src/buildEdges.cpp
src/ExaTrkXPipeline.cpp
)
if(ACTS_EXATRKX_ENABLE_ONNX)
- list(APPEND SOURCES
+ list(APPEND SOURCES
src/OnnxEdgeClassifier.cpp
src/OnnxMetricLearning.cpp
src/CugraphTrackBuilding.cpp
@@ -12,7 +12,7 @@ if(ACTS_EXATRKX_ENABLE_ONNX)
endif()
if(ACTS_EXATRKX_ENABLE_TORCH)
- list(APPEND SOURCES
+ list(APPEND SOURCES
src/TorchEdgeClassifier.cpp
src/TorchMetricLearning.cpp
src/BoostTrackBuilding.cpp
@@ -28,7 +28,7 @@ add_library(
target_include_directories(
ActsPluginExaTrkX
- PUBLIC
+ PUBLIC
$
$
)
@@ -80,7 +80,7 @@ if(ACTS_EXATRKX_ENABLE_TORCH)
TorchScatter::TorchScatter
)
- # Should not discard TorchScatter even if its not needed at this point
+ # Should not discard TorchScatter even if its not needed at this point
# since we need the scatter_max operation in the torch script later
target_link_options(
ActsPluginExaTrkX
diff --git a/Plugins/Podio/CMakeLists.txt b/Plugins/Podio/CMakeLists.txt
index 568a0ba80b1..606f488f1f3 100644
--- a/Plugins/Podio/CMakeLists.txt
+++ b/Plugins/Podio/CMakeLists.txt
@@ -18,8 +18,8 @@ target_link_libraries(
# message(STATUS "IO HANDLERS: ${PODIO_IO_HANDLERS}")
PODIO_GENERATE_DATAMODEL(
- ActsPodioEdm
- ${CMAKE_CURRENT_LIST_DIR}/edm.yml
+ ActsPodioEdm
+ ${CMAKE_CURRENT_LIST_DIR}/edm.yml
headers
sources
IO_BACKEND_HANDLERS ${PODIO_IO_HANDLERS}
@@ -27,9 +27,9 @@ PODIO_GENERATE_DATAMODEL(
PODIO_ADD_DATAMODEL_CORE_LIB(ActsPodioEdm "${headers}" "${sources}")
-target_link_libraries(ActsPluginPodio PUBLIC
- ActsPodioEdm
- ROOT::Core
+target_link_libraries(ActsPluginPodio PUBLIC
+ ActsPodioEdm
+ ROOT::Core
podio::podio
podio::podioRootIO
)
@@ -76,4 +76,3 @@ if (${ROOT_VERSION} GREATER 6)
"${CMAKE_CURRENT_BINARY_DIR}/libActsPodioEdmDict_rdict.pcm"
DESTINATION "${CMAKE_INSTALL_LIBDIR}" COMPONENT dev)
endif()
-
diff --git a/Plugins/Podio/edm.yml b/Plugins/Podio/edm.yml
index 4509de3e6fa..0b5996c37c7 100644
--- a/Plugins/Podio/edm.yml
+++ b/Plugins/Podio/edm.yml
@@ -22,7 +22,7 @@ components:
"
ActsPodioEdm::Surface:
- Members:
+ Members:
- int surfaceType
- int boundsType
- uint64_t geometryId
@@ -96,7 +96,7 @@ datatypes:
ActsPodioEdm::TrackState:
Description: "Local state on a track"
Author : "Paul Gessinger, CERN"
- Members:
+ Members:
- ActsPodioEdm::TrackStateInfo data // local information
- ActsPodioEdm::Surface referenceSurface // reference surface
@@ -128,4 +128,3 @@ datatypes:
# ExtraCode:
# declaration: >
# auto data() { return &m_obj->data; }
-
diff --git a/Plugins/TGeo/CMakeLists.txt b/Plugins/TGeo/CMakeLists.txt
index d52f7ff1139..cd61a3839b1 100644
--- a/Plugins/TGeo/CMakeLists.txt
+++ b/Plugins/TGeo/CMakeLists.txt
@@ -1,6 +1,6 @@
-set(library_sources
- src/TGeoCylinderDiscSplitter.cpp
+set(library_sources
+ src/TGeoCylinderDiscSplitter.cpp
src/TGeoDetectorElement.cpp
src/TGeoLayerBuilder.cpp
src/TGeoParser.cpp
@@ -17,7 +17,7 @@ endif()
add_library(
ActsPluginTGeo SHARED ${library_sources})
-
+
target_include_directories(
ActsPluginTGeo
diff --git a/Tests/Data/README.md b/Tests/Data/README.md
index a36e28f704e..152e7278d70 100644
--- a/Tests/Data/README.md
+++ b/Tests/Data/README.md
@@ -12,4 +12,4 @@ helper functions from the `CommonHelpers` package:
...
auto path = Acts::Test::getDataPath("some-data-file.csv");
-```
\ No newline at end of file
+```
diff --git a/Tests/Data/material-map.json b/Tests/Data/material-map.json
index 7e2145f861c..a59a29a7d17 100644
--- a/Tests/Data/material-map.json
+++ b/Tests/Data/material-map.json
@@ -129,7 +129,7 @@
],
"mapMaterial": true,
"type": "interpolated3D"
- }
+ }
}
}
]
@@ -191,7 +191,7 @@
"thickness": 5.19966459274292
}
]
- ],
+ ],
"mapMaterial": true,
"mappingType": "Default",
"type": "binned"
@@ -249,7 +249,7 @@
"thickness": 5.167008399963379
}
]
- ],
+ ],
"mapMaterial": true,
"mappingType": "Default",
"type": "binned"
@@ -456,4 +456,4 @@
}
]
}
-}
\ No newline at end of file
+}
diff --git a/Tests/Data/vertexing_event_mu20_beamspot.csv b/Tests/Data/vertexing_event_mu20_beamspot.csv
index 2397aaf1c52..01a1711f152 100644
--- a/Tests/Data/vertexing_event_mu20_beamspot.csv
+++ b/Tests/Data/vertexing_event_mu20_beamspot.csv
@@ -1,2 +1,2 @@
posX,posY,posZ,covXX,covYY,covZZ
--0.5,-0.5,0,0.0001,0.0001,1764
\ No newline at end of file
+-0.5,-0.5,0,0.0001,0.0001,1764
diff --git a/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv b/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv
index 7ff43648309..d2ceaf55b1f 100644
--- a/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv
+++ b/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv
@@ -22,4 +22,4 @@ posX,posY,posZ,covXX,covXY,covXZ,covYX,covYY,covYZ,covZX,covZY,covZZ,nTracks,trk
-0.501144,-0.502245,-38.9118,0.000100207,-7.1557e-07,-2.06063e-05,-7.1557e-07,0.000100144,3.25276e-05,-2.06063e-05,3.25276e-05,0.0143517,5,9.58214e-06,24.2323,0
-0.4979,-0.499479,-52.6555,0.000100094,9.8453e-08,7.08961e-06,9.8453e-08,0.000101352,-5.56276e-05,7.08961e-06,-5.56276e-05,0.0198663,3,0.96703,2.24272,0
-0.500966,-0.500699,9.55961,0.00010039,3.49323e-07,6.85953e-05,3.49323e-07,0.000100734,4.98106e-05,6.85953e-05,4.98106e-05,0.0278505,9,0.0599596,6.80763,0
--0.499095,-0.499195,48.8015,0.000100826,3.65595e-07,4.70832e-05,3.65595e-07,0.000101036,5.27768e-05,4.70832e-05,5.27768e-05,0.0304281,2,0.827012,5.87081,0
\ No newline at end of file
+-0.499095,-0.499195,48.8015,0.000100826,3.65595e-07,4.70832e-05,3.65595e-07,0.000101036,5.27768e-05,4.70832e-05,5.27768e-05,0.0304281,2,0.827012,5.87081,0
diff --git a/Tests/UnitTests/Benchmarks/CMakeLists.txt b/Tests/UnitTests/Benchmarks/CMakeLists.txt
index 264d0a9f129..d79cc32d06d 100644
--- a/Tests/UnitTests/Benchmarks/CMakeLists.txt
+++ b/Tests/UnitTests/Benchmarks/CMakeLists.txt
@@ -1 +1 @@
-add_unittest(BenchmarkTools BenchmarkTools.cpp)
\ No newline at end of file
+add_unittest(BenchmarkTools BenchmarkTools.cpp)
diff --git a/Tests/UnitTests/Core/Detector/CMakeLists.txt b/Tests/UnitTests/Core/Detector/CMakeLists.txt
index e8fb8bcb5fa..5ad3c568db0 100644
--- a/Tests/UnitTests/Core/Detector/CMakeLists.txt
+++ b/Tests/UnitTests/Core/Detector/CMakeLists.txt
@@ -25,5 +25,3 @@ add_unittest(Portal PortalTests.cpp)
add_unittest(PortalGenerators PortalGeneratorsTests.cpp)
add_unittest(VolumeStructureBuilder VolumeStructureBuilderTests.cpp)
add_unittest(MultiWireStructureBuilder MultiWireStructureBuilderTests.cpp)
-
-
diff --git a/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp b/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp
index 0a0d9434c90..434b808e309 100644
--- a/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp
+++ b/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(ProtoLayerTests) {
std::stringstream sstream;
protoLayerRot.toStream(sstream);
std::string oString = R"(ProtoLayer with dimensions (min/max)
-Extent in space :
+Extent in space :
- value : binX | range = [-6.66104, 6.66104]
- value : binY | range = [-4.85241, 4.85241]
- value : binZ | range = [-6, 6]
diff --git a/Tests/UnitTests/Core/MagneticField/CMakeLists.txt b/Tests/UnitTests/Core/MagneticField/CMakeLists.txt
index 822c917da28..f207d5d6228 100644
--- a/Tests/UnitTests/Core/MagneticField/CMakeLists.txt
+++ b/Tests/UnitTests/Core/MagneticField/CMakeLists.txt
@@ -2,4 +2,4 @@ add_unittest(ConstantBField ConstantBFieldTests.cpp)
add_unittest(InterpolatedBFieldMap InterpolatedBFieldMapTests.cpp)
#add_unittest(MagneticFieldInterfaceConsistency MagneticFieldInterfaceConsistencyTests.cpp)
add_unittest(SolenoidBField SolenoidBFieldTests.cpp)
-add_unittest(MagneticFieldProvider MagneticFieldProviderTests.cpp)
\ No newline at end of file
+add_unittest(MagneticFieldProvider MagneticFieldProviderTests.cpp)
diff --git a/Tests/UnitTests/Core/Navigation/CMakeLists.txt b/Tests/UnitTests/Core/Navigation/CMakeLists.txt
index a5785f38006..69ebc582396 100644
--- a/Tests/UnitTests/Core/Navigation/CMakeLists.txt
+++ b/Tests/UnitTests/Core/Navigation/CMakeLists.txt
@@ -4,4 +4,3 @@ add_unittest(NavigationState NavigationStateTests.cpp)
add_unittest(NavigationStateUpdaters NavigationStateUpdatersTests.cpp)
add_unittest(DetectorNavigator DetectorNavigatorTests.cpp)
add_unittest(MultiWireNavigation MultiWireNavigationTests.cpp)
-
diff --git a/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp b/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp
index 5d527c4a2ce..361dd461282 100644
--- a/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp
+++ b/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp
@@ -128,30 +128,30 @@ const struct {
} rectShiftedDimensions;
const std::vector rectShiftedTestPoints = {
- {0.00, 1.50}, {0.00, 1.80}, {0.00, 2.10}, {0.00, 2.40}, {0.00, 2.70},
- {0.00, 3.00}, {0.00, 3.30}, {0.00, 3.60}, {0.00, 3.90}, {0.00, 4.20},
- {0.00, 4.50}, {0.40, 1.50}, {0.40, 1.80}, {0.40, 2.10}, {0.40, 2.40},
- {0.40, 2.70}, {0.40, 3.00}, {0.40, 3.30}, {0.40, 3.60}, {0.40, 3.90},
- {0.40, 4.20}, {0.40, 4.50}, {0.80, 1.50}, {0.80, 1.80}, {0.80, 2.10},
- {0.80, 2.40}, {0.80, 2.70}, {0.80, 3.00}, {0.80, 3.30}, {0.80, 3.60},
- {0.80, 3.90}, {0.80, 4.20}, {0.80, 4.50}, {1.20, 1.50}, {1.20, 1.80},
- {1.20, 2.10}, {1.20, 2.40}, {1.20, 2.70}, {1.20, 3.00}, {1.20, 3.30},
- {1.20, 3.60}, {1.20, 3.90}, {1.20, 4.20}, {1.20, 4.50}, {1.60, 1.50},
- {1.60, 1.80}, {1.60, 2.10}, {1.60, 2.40}, {1.60, 2.70}, {1.60, 3.00},
- {1.60, 3.30}, {1.60, 3.60}, {1.60, 3.90}, {1.60, 4.20}, {1.60, 4.50},
- {2.00, 1.50}, {2.00, 1.80}, {2.00, 2.10}, {2.00, 2.40}, {2.00, 2.70},
- {2.00, 3.00}, {2.00, 3.30}, {2.00, 3.60}, {2.00, 3.90}, {2.00, 4.20},
- {2.00, 4.50}, {2.40, 1.50}, {2.40, 1.80}, {2.40, 2.10}, {2.40, 2.40},
- {2.40, 2.70}, {2.40, 3.00}, {2.40, 3.30}, {2.40, 3.60}, {2.40, 3.90},
- {2.40, 4.20}, {2.40, 4.50}, {2.80, 1.50}, {2.80, 1.80}, {2.80, 2.10},
- {2.80, 2.40}, {2.80, 2.70}, {2.80, 3.00}, {2.80, 3.30}, {2.80, 3.60},
- {2.80, 3.90}, {2.80, 4.20}, {2.80, 4.50}, {3.20, 1.50}, {3.20, 1.80},
- {3.20, 2.10}, {3.20, 2.40}, {3.20, 2.70}, {3.20, 3.00}, {3.20, 3.30},
- {3.20, 3.60}, {3.20, 3.90}, {3.20, 4.20}, {3.20, 4.50}, {3.60, 1.50},
- {3.60, 1.80}, {3.60, 2.10}, {3.60, 2.40}, {3.60, 2.70}, {3.60, 3.00},
- {3.60, 3.30}, {3.60, 3.60}, {3.60, 3.90}, {3.60, 4.20}, {3.60, 4.50},
- {4.00, 1.50}, {4.00, 1.80}, {4.00, 2.10}, {4.00, 2.40}, {4.00, 2.70},
- {4.00, 3.00}, {4.00, 3.30}, {4.00, 3.60}, {4.00, 3.90}, {4.00, 4.20},
+ {0.00, 1.50}, {0.00, 1.80}, {0.00, 2.10}, {0.00, 2.40}, {0.00, 2.70},
+ {0.00, 3.00}, {0.00, 3.30}, {0.00, 3.60}, {0.00, 3.90}, {0.00, 4.20},
+ {0.00, 4.50}, {0.40, 1.50}, {0.40, 1.80}, {0.40, 2.10}, {0.40, 2.40},
+ {0.40, 2.70}, {0.40, 3.00}, {0.40, 3.30}, {0.40, 3.60}, {0.40, 3.90},
+ {0.40, 4.20}, {0.40, 4.50}, {0.80, 1.50}, {0.80, 1.80}, {0.80, 2.10},
+ {0.80, 2.40}, {0.80, 2.70}, {0.80, 3.00}, {0.80, 3.30}, {0.80, 3.60},
+ {0.80, 3.90}, {0.80, 4.20}, {0.80, 4.50}, {1.20, 1.50}, {1.20, 1.80},
+ {1.20, 2.10}, {1.20, 2.40}, {1.20, 2.70}, {1.20, 3.00}, {1.20, 3.30},
+ {1.20, 3.60}, {1.20, 3.90}, {1.20, 4.20}, {1.20, 4.50}, {1.60, 1.50},
+ {1.60, 1.80}, {1.60, 2.10}, {1.60, 2.40}, {1.60, 2.70}, {1.60, 3.00},
+ {1.60, 3.30}, {1.60, 3.60}, {1.60, 3.90}, {1.60, 4.20}, {1.60, 4.50},
+ {2.00, 1.50}, {2.00, 1.80}, {2.00, 2.10}, {2.00, 2.40}, {2.00, 2.70},
+ {2.00, 3.00}, {2.00, 3.30}, {2.00, 3.60}, {2.00, 3.90}, {2.00, 4.20},
+ {2.00, 4.50}, {2.40, 1.50}, {2.40, 1.80}, {2.40, 2.10}, {2.40, 2.40},
+ {2.40, 2.70}, {2.40, 3.00}, {2.40, 3.30}, {2.40, 3.60}, {2.40, 3.90},
+ {2.40, 4.20}, {2.40, 4.50}, {2.80, 1.50}, {2.80, 1.80}, {2.80, 2.10},
+ {2.80, 2.40}, {2.80, 2.70}, {2.80, 3.00}, {2.80, 3.30}, {2.80, 3.60},
+ {2.80, 3.90}, {2.80, 4.20}, {2.80, 4.50}, {3.20, 1.50}, {3.20, 1.80},
+ {3.20, 2.10}, {3.20, 2.40}, {3.20, 2.70}, {3.20, 3.00}, {3.20, 3.30},
+ {3.20, 3.60}, {3.20, 3.90}, {3.20, 4.20}, {3.20, 4.50}, {3.60, 1.50},
+ {3.60, 1.80}, {3.60, 2.10}, {3.60, 2.40}, {3.60, 2.70}, {3.60, 3.00},
+ {3.60, 3.30}, {3.60, 3.60}, {3.60, 3.90}, {3.60, 4.20}, {3.60, 4.50},
+ {4.00, 1.50}, {4.00, 1.80}, {4.00, 2.10}, {4.00, 2.40}, {4.00, 2.70},
+ {4.00, 3.00}, {4.00, 3.30}, {4.00, 3.60}, {4.00, 3.90}, {4.00, 4.20},
{4.00, 4.50}
};
//const std::vector rectShiftedClosestPoints = {
@@ -182,34 +182,34 @@ const std::vector rectShiftedTestPoints = {
// {3.00, 4.00}
//};
const std::vector rectShiftedDistances = {
- 1.118033988749895, 1.019803902718557, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
- 1.0198039027185568, 1.118033988749895, 0.7810249675906654, 0.6324555320336759,
- 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6324555320336757, 0.7810249675906654,
- 0.5385164807134504, 0.28284271247461895, 0.19999999999999996,
- 0.19999999999999996, 0.19999999999999996, 0.19999999999999996,
- 0.19999999999999996, 0.19999999999999996, 0.19999999999999996,
- 0.28284271247461845, 0.5385164807134504, 0.5, 0.19999999999999996,
- -0.10000000000000009, -0.20000000000000018, -0.20000000000000018,
- -0.20000000000000018, -0.20000000000000018, -0.20000000000000018,
- -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996,
- -0.10000000000000009, -0.3999999999999999, -0.6000000000000001,
- -0.6000000000000001, -0.6000000000000001, -0.3999999999999999,
- -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996,
- -0.10000000000000009, -0.3999999999999999, -0.7000000000000002, -1.0,
- -0.7000000000000002, -0.3999999999999999, -0.10000000000000009,
- 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009,
- -0.3999999999999999, -0.5999999999999996, -0.5999999999999996,
- -0.5999999999999996, -0.3999999999999999, -0.10000000000000009,
- 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009,
- -0.19999999999999973, -0.19999999999999973, -0.19999999999999973,
- -0.19999999999999973, -0.19999999999999973, -0.10000000000000009,
- 0.1999999999999993, 0.5, 0.5385164807134505, 0.28284271247461906,
- 0.20000000000000018, 0.20000000000000018, 0.20000000000000018,
- 0.20000000000000018, 0.20000000000000018, 0.20000000000000018,
- 0.20000000000000018, 0.2828427124746186, 0.5385164807134505, 0.7810249675906655,
- 0.6324555320336759, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001,
- 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001,
- 0.6324555320336757, 0.7810249675906655, 1.118033988749895, 1.019803902718557,
+ 1.118033988749895, 1.019803902718557, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0198039027185568, 1.118033988749895, 0.7810249675906654, 0.6324555320336759,
+ 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6324555320336757, 0.7810249675906654,
+ 0.5385164807134504, 0.28284271247461895, 0.19999999999999996,
+ 0.19999999999999996, 0.19999999999999996, 0.19999999999999996,
+ 0.19999999999999996, 0.19999999999999996, 0.19999999999999996,
+ 0.28284271247461845, 0.5385164807134504, 0.5, 0.19999999999999996,
+ -0.10000000000000009, -0.20000000000000018, -0.20000000000000018,
+ -0.20000000000000018, -0.20000000000000018, -0.20000000000000018,
+ -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996,
+ -0.10000000000000009, -0.3999999999999999, -0.6000000000000001,
+ -0.6000000000000001, -0.6000000000000001, -0.3999999999999999,
+ -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996,
+ -0.10000000000000009, -0.3999999999999999, -0.7000000000000002, -1.0,
+ -0.7000000000000002, -0.3999999999999999, -0.10000000000000009,
+ 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009,
+ -0.3999999999999999, -0.5999999999999996, -0.5999999999999996,
+ -0.5999999999999996, -0.3999999999999999, -0.10000000000000009,
+ 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009,
+ -0.19999999999999973, -0.19999999999999973, -0.19999999999999973,
+ -0.19999999999999973, -0.19999999999999973, -0.10000000000000009,
+ 0.1999999999999993, 0.5, 0.5385164807134505, 0.28284271247461906,
+ 0.20000000000000018, 0.20000000000000018, 0.20000000000000018,
+ 0.20000000000000018, 0.20000000000000018, 0.20000000000000018,
+ 0.20000000000000018, 0.2828427124746186, 0.5385164807134505, 0.7810249675906655,
+ 0.6324555320336759, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001,
+ 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001,
+ 0.6324555320336757, 0.7810249675906655, 1.118033988749895, 1.019803902718557,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0198039027185568, 1.118033988749895
};
diff --git a/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp b/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp
index e0bcbfe2e25..09bade86ae8 100644
--- a/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp
+++ b/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp
@@ -83,7 +83,7 @@ l 4 1
BOOST_AUTO_TEST_CASE(Visualization3DTesterPly) {
// Test the tester
std::string validPly = R"(ply
-format ascii 1.0
+format ascii 1.0
comment made by Greg Turk
comment this file is a cube
element vertex 8
@@ -122,7 +122,7 @@ end_header
// Test the tester - contains 3 errors
std::string invalidPly = R"(ply
-format ascii 1.0
+format ascii 1.0
comment made by Greg Turk
comment this file is a cube
element vertex 8
diff --git a/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt b/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt
index c7fc2ca7437..cde4b28ae4d 100644
--- a/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt
+++ b/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt
@@ -1,3 +1,3 @@
set(unittest_extra_libraries ActsExamplesDigitization ActsExamplesIoJson)
-add_unittest(JsonDigitizationConfig JsonDigitizationConfigTests.cpp)
\ No newline at end of file
+add_unittest(JsonDigitizationConfig JsonDigitizationConfigTests.cpp)
diff --git a/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt b/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt
index 89b8cb6d01b..8cc71759a99 100644
--- a/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt
+++ b/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt
@@ -1,3 +1,2 @@
add_executable(ActsUnitTestSeedFinderCuda SeedFinderCudaTest.cpp)
target_link_libraries(ActsUnitTestSeedFinderCuda PRIVATE ${unittest_extra_libraries} Boost::boost)
-
diff --git a/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt b/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt
index 7a738a7b384..0dfe2a66901 100644
--- a/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt
+++ b/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt
@@ -18,7 +18,7 @@ dd4hep_generate_rootmap(ActsTestsDD4hepFactories)
find_library(dd4hep_core_library DDCore)
-if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24.0")
+if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24.0")
set(factory_path "$>")
else()
set(factory_path "${CMAKE_CURRENT_BINARY_DIR}")
@@ -34,10 +34,10 @@ if (NOT "${dd4hep_core_library}" STREQUAL "dd4hep_core_library-NOTFOUND")
add_unittest(${_test} ${_test}Tests.cpp)
add_dependencies(ActsUnitTest${_test} Components_ActsTestsDD4hepFactories)
if(APPLE)
- set_property(TEST ${_test} PROPERTY ENVIRONMENT
- "DYLD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{DYLD_LIBRARY_PATH}")
+ set_property(TEST ${_test} PROPERTY ENVIRONMENT
+ "DYLD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{DYLD_LIBRARY_PATH}")
else()
- set_property(TEST ${_test} PROPERTY ENVIRONMENT
+ set_property(TEST ${_test} PROPERTY ENVIRONMENT
"LD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{LD_LIBRARY_PATH}")
endif()
endforeach()
diff --git a/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp b/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp
index 715a2a09159..5620ca8d585 100644
--- a/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp
+++ b/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp
@@ -45,7 +45,7 @@ const char* beampipe_head_xml =
-
+
)"""";
const char* nec_head_xml =
@@ -79,7 +79,7 @@ const char* plugin_xml =
-
+
@@ -90,7 +90,7 @@ const char* plugin_xml =
-
+
@@ -100,7 +100,7 @@ const char* plugin_xml =
-
+
@@ -111,7 +111,7 @@ const char* plugin_xml =
-
+
@@ -121,7 +121,7 @@ const char* plugin_xml =
-
+
@@ -132,7 +132,7 @@ const char* plugin_xml =
-
+
)""";
diff --git a/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp b/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp
index ca2f7e87092..2974cd5ac32 100644
--- a/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp
+++ b/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp
@@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE(same_graph) {
BOOST_AUTO_TEST_CASE(same_graph_large_numbers) {
// clang-format off
std::int64_t k = 100'000;
-
+
std::vector truthGraph = {
1,2,
2,3,
diff --git a/cmake/ActsConfig.cmake.in b/cmake/ActsConfig.cmake.in
index 8e8caad4935..6e4fcb5e06e 100644
--- a/cmake/ActsConfig.cmake.in
+++ b/cmake/ActsConfig.cmake.in
@@ -40,7 +40,7 @@ foreach(_component ${Acts_FIND_COMPONENTS})
endif()
endforeach()
-# add this to the current CMAKE_MODULE_PATH to find third party modules
+# add this to the current CMAKE_MODULE_PATH to find third party modules
# that not provide a XXXConfig.cmake or XXX-config.cmake file
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/Modules)
diff --git a/cmake/ActsCreatePackageConfig.cmake b/cmake/ActsCreatePackageConfig.cmake
index 6cfbd570f07..dd73597d2ff 100644
--- a/cmake/ActsCreatePackageConfig.cmake
+++ b/cmake/ActsCreatePackageConfig.cmake
@@ -22,12 +22,12 @@ install(
${PROJECT_BINARY_DIR}/ActsConfigVersion.cmake
${PROJECT_BINARY_DIR}/ActsConfig.cmake
DESTINATION ${install_package_config_dir})
-
+
# install third party FindXXX.cmake files
install(
FILES
${CMAKE_CURRENT_LIST_DIR}/FindOnnxRuntime.cmake
- DESTINATION ${install_package_config_dir}/Modules)
+ DESTINATION ${install_package_config_dir}/Modules)
# install target configs for all available components
foreach(_component ${_components})
diff --git a/cmake/pythia8307-cpp20.patch b/cmake/pythia8307-cpp20.patch
deleted file mode 100644
index c6a1890dfaf..00000000000
--- a/cmake/pythia8307-cpp20.patch
+++ /dev/null
@@ -1,129 +0,0 @@
-From 09ef584f1ca797d84c1c0af18ec06b33d6c0d2d0 Mon Sep 17 00:00:00 2001
-From: Paul Gessinger
-Date: Thu, 24 Mar 2022 16:08:26 +0100
-Subject: [PATCH] fixes for C++20 build
-
----
- include/Pythia8/SusyLesHouches.h | 6 +++---
- src/HadronWidths.cc | 8 ++++----
- src/NucleonExcitations.cc | 8 ++++----
- src/PythiaParallel.cc | 2 +-
- 4 files changed, 12 insertions(+), 12 deletions(-)
-
-diff --git a/include/Pythia8/SusyLesHouches.h b/include/Pythia8/SusyLesHouches.h
-index 2f1d9fd..5090c00 100644
---- a/include/Pythia8/SusyLesHouches.h
-+++ b/include/Pythia8/SusyLesHouches.h
-@@ -28,7 +28,7 @@ namespace Pythia8 {
- public:
-
- //Constructor.
-- LHblock() : idnow(0), qDRbar(), i(), val() {} ;
-+ LHblock() : idnow(0), qDRbar(), i(), val() {} ;
-
- //Does block exist?
- bool exists() { return int(entry.size()) == 0 ? false : true ; };
-@@ -129,7 +129,7 @@ namespace Pythia8 {
- template class LHmatrixBlock {
- public:
- //Constructor. Set uninitialized and explicitly zero.
-- LHmatrixBlock() : entry(), qDRbar(), val() {
-+ LHmatrixBlock() : entry(), qDRbar(), val() {
- initialized=false;
- for (i=1;i<=size;i++) {
- for (j=1;j<=size;j++) {
-@@ -208,7 +208,7 @@ namespace Pythia8 {
- template class LHtensor3Block {
- public:
- //Constructor. Set uninitialized and explicitly zero.
-- LHtensor3Block() : entry(), qDRbar(), val() {
-+ LHtensor3Block() : entry(), qDRbar(), val() {
- initialized=false;
- for (i=1;i<=size;i++) {
- for (j=1;j<=size;j++) {
-diff --git a/src/HadronWidths.cc b/src/HadronWidths.cc
-index ccc5c72..95a5cb1 100644
---- a/src/HadronWidths.cc
-+++ b/src/HadronWidths.cc
-@@ -867,7 +867,7 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA,
- return 0.;
-
- // Integrate mass of A.
-- auto f = [=](double mA) {
-+ auto f = [=,this](double mA) {
- return pow(pCMS(eCM, mA, m0B), lType) * mDistr(idA, mA); };
- if (!integrateGauss(result, f, mMinA, min(mMaxA, eCM - m0B)))
- success = false;
-@@ -879,7 +879,7 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA,
- return 0.;
-
- // Integrate mass of B.
-- auto f = [=](double mB) {
-+ auto f = [=,this](double mB) {
- return pow(pCMS(eCM, m0A, mB), lType) * mDistr(idB, mB); };
- if (!integrateGauss(result, f, mMinB, min(mMaxB, eCM - m0A)))
- success = false;
-@@ -891,10 +891,10 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA,
- return 0.;
-
- // Define integrand of outer integral.
-- auto I = [=, &success](double mA) {
-+ auto I = [=, &success, this](double mA) {
-
- // Define integrand of inner integral.
-- auto f = [=](double mB) {
-+ auto f = [=,this](double mB) {
- return pow(pCMS(eCM, mA, mB), lType)
- * mDistr(idA, mA) * mDistr(idB, mB); };
- double res;
-diff --git a/src/NucleonExcitations.cc b/src/NucleonExcitations.cc
-index b5eef8f..a82383a 100644
---- a/src/NucleonExcitations.cc
-+++ b/src/NucleonExcitations.cc
-@@ -502,7 +502,7 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA,
- return 0.;
-
- // Integrate mass of A.
-- auto f = [=](double mA) {
-+ auto f = [=, this](double mA) {
- return pCMS(eCM, mA, m0B) * hadronWidthsPtr->mDistr(idA, mA); };
- if (!integrateGauss(result, f, mMinA, min(mMaxA, eCM - m0B)))
- success = false;
-@@ -514,7 +514,7 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA,
- return 0.;
-
- // Integrate mass of B.
-- auto f = [=](double mB) {
-+ auto f = [=,this](double mB) {
- return pCMS(eCM, m0A, mB) * hadronWidthsPtr->mDistr(idB, mB); };
- if (!integrateGauss(result, f, mMinB, min(mMaxB, eCM - m0A)))
- success = false;
-@@ -526,10 +526,10 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA,
- return 0.;
-
- // Define integrand of outer integral.
-- auto I = [=, &success](double mA) {
-+ auto I = [=, &success, this](double mA) {
-
- // Define integrand of inner integral.
-- auto f = [=](double mB) {
-+ auto f = [=,this](double mB) {
- return pCMS(eCM, mA, mB)
- * hadronWidthsPtr->mDistr(idA, mA)
- * hadronWidthsPtr->mDistr(idB, mB); };
-diff --git a/src/PythiaParallel.cc b/src/PythiaParallel.cc
-index 81450e2..7ec3a92 100644
---- a/src/PythiaParallel.cc
-+++ b/src/PythiaParallel.cc
-@@ -106,7 +106,7 @@ bool PythiaParallel::init(function customInit) {
- bool initSuccess = true;
-
- for (int iPythia = 0; iPythia < numThreads; iPythia += 1) {
-- initThreads.emplace_back([=, &seeds, &initSuccess]() {
-+ initThreads.emplace_back([=, &seeds, &initSuccess, this]() {
- Pythia* pythiaPtr = new Pythia(settings, particleData, false);
- pythiaObjects[iPythia] = unique_ptr(pythiaPtr);
- pythiaObjects[iPythia]->settings.flag("Print:quiet", true);
---
-2.31.1
-
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index 566fcc547c7..b9f309d4da3 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -3,7 +3,7 @@
# the code. when running running on readthedocs.org, the build is fully driven
# by Sphinx, including running Doxygen.
#
-# this CMake-based build is only intended for local development.
+# this CMake-based build is only intended for local development.
set(sphinx_build ${CMAKE_CURRENT_SOURCE_DIR}/_build)
set(sphinx_doctrees ${CMAKE_CURRENT_SOURCE_DIR}/_build/doctrees)
diff --git a/docs/acts_project.md b/docs/acts_project.md
index 6027024a462..519fde62560 100644
--- a/docs/acts_project.md
+++ b/docs/acts_project.md
@@ -11,8 +11,8 @@ ACTS is designed as a library that *contains components* for assembling a track
The library is structured as follows:
* The `Core` library contains considered production ready components (except for components located in the `Acts::Experimental` namespace) that can be interfaced to experiment code
* The `Plugin` folder contains additional extensions that can be optionally switched on to use increase the functionality of the software suite, but also in general increase dependencies to other/thirdparty libraries
- * The `Fatras` library contains a fast track simulation module, that is based on the same concepts that are used for the [ATLAS Fatras](https://cds.cern.ch/record/1091969) fast track simulation
- * An `Examples` folder that contains a minimal test framework used for showcasing and integration testing,
+ * The `Fatras` library contains a fast track simulation module, that is based on the same concepts that are used for the [ATLAS Fatras](https://cds.cern.ch/record/1091969) fast track simulation
+ * An `Examples` folder that contains a minimal test framework used for showcasing and integration testing,
* A `Tests` folder that contains unit tests, benchmark tests and other integration tests
diff --git a/docs/codeguide.md b/docs/codeguide.md
index c9c310b459d..402ba00be6f 100644
--- a/docs/codeguide.md
+++ b/docs/codeguide.md
@@ -160,7 +160,7 @@ static constexpr double kMagic = 1.23;
```
Variables defined in the `Acts::UnitConstants` namespace are exempted for usability reasons and use regular variable naming instead.
-
+
### N.6: Enum values use eCamelCase
Enum values use CamelCase with a `e` prefix. They are not really constants but symbolic values, e.g. they can never have an address, and warant a separate convention.
diff --git a/docs/contribution/clang_tidy.md b/docs/contribution/clang_tidy.md
index fce2a5d5567..c5087b5a5cc 100644
--- a/docs/contribution/clang_tidy.md
+++ b/docs/contribution/clang_tidy.md
@@ -17,7 +17,7 @@ a report on the issues it detected. The report should give you an error /
warning code, e.g. `readability-braces-around-statements`. The LLVM
documentation has details on all possible error codes, in this particular
example you would find it [here][readability]. This page will tell you that
-`clang-tidy` wants you to replace
+`clang-tidy` wants you to replace
```cpp
if (condition)
diff --git a/docs/contribution/documentation_cheatsheet.md b/docs/contribution/documentation_cheatsheet.md
index f7826741664..352a168c826 100644
--- a/docs/contribution/documentation_cheatsheet.md
+++ b/docs/contribution/documentation_cheatsheet.md
@@ -46,7 +46,7 @@ A link to {class}`Acts::Volume`.
## Pull in API documentation
-* Code:
+* Code:
```text
:::{doxygenclass} Acts::Volume
diff --git a/docs/contribution/release.md b/docs/contribution/release.md
index 3186eec1129..1b5dfb91d3e 100644
--- a/docs/contribution/release.md
+++ b/docs/contribution/release.md
@@ -45,7 +45,7 @@ a7ee09d 2022-05-25 11:17 +0200 Luis Falda Coelho │ o fix: Bug in xyz
4ceddf3 2022-05-25 10:26 +0200 Luis Falda Coelho │ o─┘ feat: ITk seedFilter integration and seed quality confirmation (#1201)
```
-You can now push the updated `releases` branch to the remote `releases` branch using `git push -u upstream releases`.
+You can now push the updated `releases` branch to the remote `releases` branch using `git push -u upstream releases`.
On push, a CI job should run and create an additional commit on the `releases` branch, which bumps a number of version numbers. That commit is going to be the one tagged with the correct version. It doesn't hurt to make sure that commit looks right, as in it bumps to a sensible next version number.
diff --git a/docs/contribution/run_formatting.md b/docs/contribution/run_formatting.md
index 27c9fa3df02..75f915cea88 100644
--- a/docs/contribution/run_formatting.md
+++ b/docs/contribution/run_formatting.md
@@ -48,13 +48,13 @@ Formatting of the Python source code uses the library
```console
$ pip install black
-$ black