diff --git a/.clang-format b/.clang-format index fe441f11a71..0119e54547b 100644 --- a/.clang-format +++ b/.clang-format @@ -1,10 +1,9 @@ ---- BasedOnStyle: Google Language: Cpp AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline -AllowShortIfStatementsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: Never AllowShortLoopsOnASingleLine: false PointerAlignment: Left ColumnLimit: 80 @@ -28,4 +27,3 @@ IncludeCategories: - Regex: '^<.*>' Priority: 3 # all headers not explicitly listed will be assigned to the last group ---- diff --git a/.github/issue_template.md b/.github/issue_template.md index 207f573aa3f..60d3cdee7f0 100644 --- a/.github/issue_template.md +++ b/.github/issue_template.md @@ -12,7 +12,7 @@ FOR YOUR INFORMATION AND MUST BE REMOVED BEFORE SUBMITTING THE ISSUE. - [ ] If the issue reports a bug: - - [ ] Please suggest whether this bug is critical / a blocker. One of the + - [ ] Please suggest whether this bug is critical / a blocker. One of the maintainers will assign a corresponding labels. - [ ] Does the description contain all necessary information to reproduce - the error? \ No newline at end of file + the error? diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index f9c87e2ba2c..30a8e40ee2a 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -38,4 +38,4 @@ REQUEST. - If you push updates, and you know they will be superceded later on, consider adding `[skip ci]` in the commit message. This will instruct the CI system not to run any - jobs on this commit. \ No newline at end of file + jobs on this commit. diff --git a/.github/wip.yml b/.github/wip.yml index 4a91c92f1ea..5575e4ffa62 100644 --- a/.github/wip.yml +++ b/.github/wip.yml @@ -1,6 +1,5 @@ -- locations: +- locations: - title - label_name terms: - WIP - diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index d1c32fa80f3..2170aacd7b6 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -6,7 +6,7 @@ on: - closed - labeled -concurrency: +concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index dc90dabc263..5686d842b66 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -13,31 +13,30 @@ concurrency: cancel-in-progress: true jobs: - format: - runs-on: ubuntu-latest - container: ghcr.io/acts-project/format14:51 - steps: - - uses: actions/checkout@v4 - - name: Check - run: > - git config --global safe.directory "$GITHUB_WORKSPACE" - && CI/check_format . - - uses: actions/upload-artifact@v4 - if: failure() - with: - name: changed - path: changed - format-py: + lint: runs-on: ubuntu-latest + env: + PRE_COMMIT_HOME: '/tmp/pre-commit' + steps: - - uses: actions/checkout@v4 + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: '3.12' - - name: Install black - run: pip install black[jupyter]==24.4.2 - - name: Run black format check - run: black --check . --extend-exclude ".*thirdparty.*" + + - uses: actions/cache@v4 + with: + path: | + ${{ env.PRE_COMMIT_HOME }} + key: ${{ runner.os }}-${{ hashFiles('.pre-commit-config.yaml') }} + + - name: Install pre-commit + run: pip install pre-commit + + - name: Run pre-commit + run: pre-commit run --all-files --show-diff-on-failure license: runs-on: ubuntu-latest @@ -67,16 +66,6 @@ jobs: - name: Check run: > CI/check_pragma_once.sh - end_of_line: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - with: - python-version: '3.12' - - name: Check - run: > - CI/check_end_of_file.py . --exclude "thirdparty/*" --reject-multiple-newlines --github type_t: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index d21a0092783..dfa5b303ee2 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -54,7 +54,7 @@ jobs: -b linkcheck . _build/html/ - + - uses: actions/upload-artifact@v4 with: name: acts-docs diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4747f60f4d3..5b9f4ba2480 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,6 +1,6 @@ name: Release -on: +on: push: branches: - 'releases' diff --git a/.github/workflows/report.yml b/.github/workflows/report.yml index 32379cdce91..13ff4ab311e 100644 --- a/.github/workflows/report.yml +++ b/.github/workflows/report.yml @@ -67,7 +67,7 @@ jobs: return false; } - + let number = Number(fs.readFileSync(file, {encoding: 'utf8'}).trim()); console.log('PR number is '+number); core.exportVariable('PR_NUMBER', number) diff --git a/.merge-sentinel.yml b/.merge-sentinel.yml index 8a1ae7dd2a7..f62cdb682ce 100644 --- a/.merge-sentinel.yml +++ b/.merge-sentinel.yml @@ -9,7 +9,7 @@ rules: - "main" - "develop/*" paths_ignore: - - "docs/*" + - "docs/*" required_checks: - Docs / docs @@ -24,6 +24,6 @@ rules: - "main" - "develop/*" paths: - - "docs/*" + - "docs/*" required_checks: - Docs / docs diff --git a/Alignment/README.md b/Alignment/README.md index 72a15707e00..2a13605b9be 100644 --- a/Alignment/README.md +++ b/Alignment/README.md @@ -1,5 +1,5 @@ # Alignment package This packages provides tools for detector geometry alignment. -Currently, the KalmanFitter-based alignment with internal minimization is implemented. -In the future, the minimization will be done based on the external minimization package e.g. Millepede. +Currently, the KalmanFitter-based alignment with internal minimization is implemented. +In the future, the minimization will be done based on the external minimization package e.g. Millepede. diff --git a/CI/README.md b/CI/README.md index 5f219360158..04a5e63158b 100644 --- a/CI/README.md +++ b/CI/README.md @@ -8,4 +8,4 @@ Since [`poetry`](https://python-poetry.org) supports more robust dependency lock ```console poetry export -f requirements.txt > requirements.txt -``` \ No newline at end of file +``` diff --git a/CI/codespell_ignore.txt b/CI/codespell_ignore.txt index b6550f340ea..e95603fc5b5 100644 --- a/CI/codespell_ignore.txt +++ b/CI/codespell_ignore.txt @@ -21,4 +21,4 @@ ans dthe dthe vart -pixelx \ No newline at end of file +pixelx diff --git a/CI/ctest2junit.xsl b/CI/ctest2junit.xsl index e7de63690ff..e18ae644d45 100644 --- a/CI/ctest2junit.xsl +++ b/CI/ctest2junit.xsl @@ -63,7 +63,7 @@ Extended by providing total Start date, total time, total test stats - + BuildName: BuildStamp: @@ -101,7 +101,7 @@ Extended by providing total Start date, total time, total test stats - + @@ -130,4 +130,3 @@ Extended by providing total Start date, total time, total test stats - diff --git a/CI/physmon/config/vertexing_4muon_50vertices.yml b/CI/physmon/config/vertexing_4muon_50vertices.yml index 6cec280ae15..ade73d36b23 100644 --- a/CI/physmon/config/vertexing_4muon_50vertices.yml +++ b/CI/physmon/config/vertexing_4muon_50vertices.yml @@ -83,12 +83,12 @@ histograms: nbins: 100 min: 0.999 max: 1 - + "trk_weight": nbins: 100 min: -0.01 max: 1.01 - + "sumPt2": nbins: 100 min: 0 diff --git a/CI/physmon/config/vertexing_ttbar_pu200.yml b/CI/physmon/config/vertexing_ttbar_pu200.yml index 23b8e018947..ce333e2db55 100644 --- a/CI/physmon/config/vertexing_ttbar_pu200.yml +++ b/CI/physmon/config/vertexing_ttbar_pu200.yml @@ -83,7 +83,7 @@ histograms: nbins: 100 min: 0.999 max: 1 - + "sumPt2": nbins: 100 min: 0 diff --git a/CI/physmon/phys_perf_mon.sh b/CI/physmon/phys_perf_mon.sh index 9fc97ab8c5f..8d5e9867dae 100755 --- a/CI/physmon/phys_perf_mon.sh +++ b/CI/physmon/phys_perf_mon.sh @@ -247,7 +247,7 @@ function trackfinding() { $refdir/$path/performance_ckf_ambi.root \ "Ambisolver | ${name}" \ $path/performance_ckf_ambi.html \ - $path/performance_ckf_ambi + $path/performance_ckf_ambi fi } @@ -344,7 +344,7 @@ function generation() { $outdir/data/simulation/particles_ttbar_hist.root \ --silent \ --config CI/physmon/config/pythia8_ttbar.yml - + # remove ntuple file because it's large rm $outdir/data/simulation/particles_ttbar.root diff --git a/CI/setup_cvmfs_lcg.sh b/CI/setup_cvmfs_lcg.sh index de711e0548e..529b2101187 100644 --- a/CI/setup_cvmfs_lcg.sh +++ b/CI/setup_cvmfs_lcg.sh @@ -6,7 +6,7 @@ elif test -n "$ZSH_VERSION"; then setopt function_argzero this_script=$0 else - echo "Unsupported shell. Please use bash or zsh." 1>&2 + echo "Unsupported shell. Please use bash or zsh." 1>&2 return fi diff --git a/CI/setup_cvmfs_lcg105.sh b/CI/setup_cvmfs_lcg105.sh index 531d8c827fa..4352f4efb86 100644 --- a/CI/setup_cvmfs_lcg105.sh +++ b/CI/setup_cvmfs_lcg105.sh @@ -4,7 +4,7 @@ if test -e /etc/centos-release && grep 'CentOS Linux release 7' /etc/centos-rele lcg_os=centos7 elif test -e /etc/centos-release && grep 'CentOS Stream release 8' /etc/centos-release; then lcg_os=centos8 -# not centos. Check for RHEL +# not centos. Check for RHEL elif test -e /etc/redhat-release && grep 'Linux release 9' /etc/redhat-release; then lcg_os=el9 else diff --git a/CMakePresets.json b/CMakePresets.json index 4f314fa0212..d57ced36d49 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -111,4 +111,4 @@ } } ] -} \ No newline at end of file +} diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 33f00f8b08c..c66b362258e 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,6 +1,6 @@ # Code Of Conduct -The Acts project observes [CERN's Code of Conduct](https://cern.ch/codeofconduct). +The Acts project observes [CERN's Code of Conduct](https://cern.ch/codeofconduct). Below is a plain text summary of the [official PDF](https://cds.cern.ch/record/2240689/files/BrochureCodeofConductEN.pdf?) as of 1. April 2020. Please consult the [PDF](https://cds.cern.ch/record/2240689/files/BrochureCodeofConductEN.pdf?) for up-to-date information. diff --git a/Core/CMakeLists.txt b/Core/CMakeLists.txt index cfe8e2ce1d3..192fe05f6bf 100644 --- a/Core/CMakeLists.txt +++ b/Core/CMakeLists.txt @@ -49,7 +49,7 @@ if(ACTS_LOG_FAILURE_THRESHOLD) message(STATUS "Enable log failure threshold, set to ${ACTS_LOG_FAILURE_THRESHOLD}") target_compile_definitions( ActsCore - PUBLIC + PUBLIC -DACTS_LOG_FAILURE_THRESHOLD=${ACTS_LOG_FAILURE_THRESHOLD} -DACTS_ENABLE_LOG_FAILURE_THRESHOLD) @@ -59,7 +59,7 @@ if(ACTS_ENABLE_LOG_FAILURE_THRESHOLD) message(STATUS "Enable log failure threshold") target_compile_definitions( ActsCore - PUBLIC + PUBLIC -DACTS_ENABLE_LOG_FAILURE_THRESHOLD) endif() diff --git a/Core/include/Acts/Utilities/JacobianHelpers.hpp b/Core/include/Acts/Utilities/JacobianHelpers.hpp index a666fe2ecfe..be56e5f1759 100644 --- a/Core/include/Acts/Utilities/JacobianHelpers.hpp +++ b/Core/include/Acts/Utilities/JacobianHelpers.hpp @@ -30,7 +30,7 @@ inline ActsMatrix<3, 2> sphericalToFreeDirectionJacobian( // clang-format off ActsMatrix<3, 2> jacobian; - jacobian << + jacobian << -direction.y(), cosTheta * cosPhi, direction.x(), cosTheta * sinPhi, 0, -sinTheta; diff --git a/Core/src/Geometry/Extent.cpp b/Core/src/Geometry/Extent.cpp index e8a4467a6a6..262f84ebcaf 100644 --- a/Core/src/Geometry/Extent.cpp +++ b/Core/src/Geometry/Extent.cpp @@ -196,7 +196,7 @@ bool Acts::Extent::operator==(const Extent& e) const { std::string Acts::Extent::toString(const std::string& indent) const { std::stringstream sl; - sl << indent << "Extent in space : " << std::endl; + sl << indent << "Extent in space :" << std::endl; for (const auto& bv : allBinningValues()) { if (constrains(bv)) { sl << indent << " - value :" << std::setw(10) << binningValueName(bv) diff --git a/Examples/Algorithms/Digitization/share/default-input-config-generic.json b/Examples/Algorithms/Digitization/share/default-input-config-generic.json index 49ff4baa7ba..5778d54d36f 100644 --- a/Examples/Algorithms/Digitization/share/default-input-config-generic.json +++ b/Examples/Algorithms/Digitization/share/default-input-config-generic.json @@ -312,4 +312,4 @@ } } ] -} \ No newline at end of file +} diff --git a/Examples/Algorithms/Geometry/CMakeLists.txt b/Examples/Algorithms/Geometry/CMakeLists.txt index 244898d06d1..a82e0c1e885 100644 --- a/Examples/Algorithms/Geometry/CMakeLists.txt +++ b/Examples/Algorithms/Geometry/CMakeLists.txt @@ -1,4 +1,4 @@ -add_library(ActsExamplesGeometry SHARED +add_library(ActsExamplesGeometry SHARED src/VolumeAssociationTest.cpp) target_include_directories( diff --git a/Examples/Algorithms/Propagation/CMakeLists.txt b/Examples/Algorithms/Propagation/CMakeLists.txt index 09173d9a0e0..18dcaf30963 100644 --- a/Examples/Algorithms/Propagation/CMakeLists.txt +++ b/Examples/Algorithms/Propagation/CMakeLists.txt @@ -1,4 +1,4 @@ -add_library(ActsExamplesPropagation SHARED +add_library(ActsExamplesPropagation SHARED src/PropagationAlgorithm.cpp) target_include_directories( diff --git a/Examples/Algorithms/TrackFinding/CMakeLists.txt b/Examples/Algorithms/TrackFinding/CMakeLists.txt index dd9e6b9af56..be962b242cf 100644 --- a/Examples/Algorithms/TrackFinding/CMakeLists.txt +++ b/Examples/Algorithms/TrackFinding/CMakeLists.txt @@ -23,7 +23,7 @@ target_link_libraries( ActsExamplesFramework ActsExamplesIoJson ActsExamplesMagneticField - + PRIVATE ROOT::Core ROOT::Geom ROOT::Graf ROOT::Hist ROOT::Gpad ) diff --git a/Examples/Algorithms/TrackFindingExaTrkX/src/TrackFindingAlgorithmExaTrkX.cpp b/Examples/Algorithms/TrackFindingExaTrkX/src/TrackFindingAlgorithmExaTrkX.cpp index 6747097c88f..6bd254a0120 100644 --- a/Examples/Algorithms/TrackFindingExaTrkX/src/TrackFindingAlgorithmExaTrkX.cpp +++ b/Examples/Algorithms/TrackFindingExaTrkX/src/TrackFindingAlgorithmExaTrkX.cpp @@ -74,7 +74,7 @@ ActsExamples::TrackFindingAlgorithmExaTrkX::TrackFindingAlgorithmExaTrkX( dummyInput.data() + dummyInput.size()); std::vector spacepointIDs; std::iota(spacepointIDs.begin(), spacepointIDs.end(), 0); - + runPipeline(dummyInputVec, spacepointIDs); } #endif diff --git a/Examples/Algorithms/TrackFindingML/CMakeLists.txt b/Examples/Algorithms/TrackFindingML/CMakeLists.txt index 845a9d1ece6..90b9913efee 100644 --- a/Examples/Algorithms/TrackFindingML/CMakeLists.txt +++ b/Examples/Algorithms/TrackFindingML/CMakeLists.txt @@ -1,7 +1,7 @@ -set(SOURCES +set(SOURCES src/AmbiguityResolutionML.cpp src/AmbiguityResolutionMLAlgorithm.cpp - src/AmbiguityResolutionMLDBScanAlgorithm.cpp + src/AmbiguityResolutionMLDBScanAlgorithm.cpp src/SeedFilterMLAlgorithm.cpp ) diff --git a/Examples/Detectors/Geant4Detector/CMakeLists.txt b/Examples/Detectors/Geant4Detector/CMakeLists.txt index c10d4b682cb..56b3b872930 100644 --- a/Examples/Detectors/Geant4Detector/CMakeLists.txt +++ b/Examples/Detectors/Geant4Detector/CMakeLists.txt @@ -12,4 +12,3 @@ target_link_libraries( install( TARGETS ActsExamplesDetectorGeant4 LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) - \ No newline at end of file diff --git a/Examples/Detectors/MuonSpectrometerMockupDetector/CMakeLists.txt b/Examples/Detectors/MuonSpectrometerMockupDetector/CMakeLists.txt index 25a7cc948ea..9a5c02082ed 100644 --- a/Examples/Detectors/MuonSpectrometerMockupDetector/CMakeLists.txt +++ b/Examples/Detectors/MuonSpectrometerMockupDetector/CMakeLists.txt @@ -11,4 +11,3 @@ target_link_libraries( install( TARGETS ActsExamplesMuonSpectrometerMockupDetector LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) - diff --git a/Examples/Detectors/TGeoDetector/CMakeLists.txt b/Examples/Detectors/TGeoDetector/CMakeLists.txt index ff5d8e24b86..6bb8967f0b7 100644 --- a/Examples/Detectors/TGeoDetector/CMakeLists.txt +++ b/Examples/Detectors/TGeoDetector/CMakeLists.txt @@ -2,7 +2,7 @@ add_library( ActsExamplesDetectorTGeo SHARED src/TGeoDetector.cpp src/TGeoITkModuleSplitter.cpp) - + target_include_directories( ActsExamplesDetectorTGeo PUBLIC $) @@ -17,5 +17,5 @@ install( LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) install( - DIRECTORY include/ActsExamples - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) \ No newline at end of file + DIRECTORY include/ActsExamples + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/Examples/Framework/ML/CMakeLists.txt b/Examples/Framework/ML/CMakeLists.txt index 9ba85666fde..bd574de9fa6 100644 --- a/Examples/Framework/ML/CMakeLists.txt +++ b/Examples/Framework/ML/CMakeLists.txt @@ -17,5 +17,5 @@ install( LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) install( - DIRECTORY include/ActsExamples + DIRECTORY include/ActsExamples DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/Examples/Io/CMakeLists.txt b/Examples/Io/CMakeLists.txt index 33356cf789c..dcf7c16a0a1 100644 --- a/Examples/Io/CMakeLists.txt +++ b/Examples/Io/CMakeLists.txt @@ -6,4 +6,4 @@ add_subdirectory(NuclearInteractions) add_subdirectory(Obj) add_subdirectory(Performance) add_subdirectory(Root) -add_subdirectory_if(Svg ACTS_BUILD_PLUGIN_ACTSVG) \ No newline at end of file +add_subdirectory_if(Svg ACTS_BUILD_PLUGIN_ACTSVG) diff --git a/Examples/Scripts/Benchmarking/CKF_timing_vs_mu.sh b/Examples/Scripts/Benchmarking/CKF_timing_vs_mu.sh index 06969bcdfb7..99f77e70675 100755 --- a/Examples/Scripts/Benchmarking/CKF_timing_vs_mu.sh +++ b/Examples/Scripts/Benchmarking/CKF_timing_vs_mu.sh @@ -9,15 +9,15 @@ help() { echo "" echo "Usage: $0 -d -b -t -n " - echo -e "\t-d The detector type, either 'Generic' or 'DD4hep'. Optional. In default 'Generic'" - echo -e "\t-x The '.xml' for DD4hep detector input. Required if the detector is 'DD4hep'. In default empty" + echo -e "\t-d The detector type, either 'Generic' or 'DD4hep'. Optional. In default 'Generic'" + echo -e "\t-x The '.xml' for DD4hep detector input. Required if the detector is 'DD4hep'. In default empty" echo -e "\t-b The '.txt' or '.root' file for B Field map. Optional. In default using constant BField: (0, 0, 2)" echo -e "\t-n The number of events. Optional. In default: 1" exit 1 # Exit script after printing help } -if [ ! -f "ActsExampleFatrasGeneric" ]; then - echo Please run this script under the directory where the executables are located +if [ ! -f "ActsExampleFatrasGeneric" ]; then + echo Please run this script under the directory where the executables are located exit 1 fi @@ -79,7 +79,7 @@ echo "* job | mode | mu " >> ${output_file} jobID=0 -# Loop over the pileup bins +# Loop over the pileup bins for mu in 0 50 100 150 200 250 300 ; do #Run ttbar events generation gen="${exe_dir}/ActsExamplePythia8 --events=${numEvents} --output-dir=data/gen/ttbar_e${numEvents}_mu${mu} --output-csv=1 --rnd-seed=42 --gen-cms-energy-gev=14000 --gen-hard-process=Top:qqbar2ttbar=on --gen-npileup=${mu}" @@ -91,9 +91,9 @@ for mu in 0 50 100 150 200 250 300 ; do echo ${sim} eval ${sim} - # Loop over the combinatorial/sequential mode (different source link selection criteria) + # Loop over the combinatorial/sequential mode (different source link selection criteria) for mode in {0..1} ; do - # Run reco + # Run reco if [[ $mode -eq 0 ]]; then reco="${exe_dir}/ActsExampleCKFTracks${detector} ${dd4hep_input} ${bField} -j 1 --input-dir=data/sim_${detector}/ttbar_e${numEvents}_mu${mu} --output-dir=data/reco_${detector}/ttbar_e${numEvents}_mu${mu}_m${mode}" else diff --git a/Examples/Scripts/Benchmarking/KF_timing.sh b/Examples/Scripts/Benchmarking/KF_timing.sh index d836601f7ea..389ca532514 100644 --- a/Examples/Scripts/Benchmarking/KF_timing.sh +++ b/Examples/Scripts/Benchmarking/KF_timing.sh @@ -9,7 +9,7 @@ help() { echo "" echo "Usage: $0 -d -b -t -n " - echo -e "\t-d The detector type, either 'Generic' or 'DD4hep'. Optional. In default 'Generic'" + echo -e "\t-d The detector type, either 'Generic' or 'DD4hep'. Optional. In default 'Generic'" echo -e "\t-x The '.xml' for DD4hep detector input. Required if the detector is 'DD4hep'. In default empty" echo -e "\t-b The '.txt' or '.root' file for B Field map. Optional. In default using constant BField: (0, 0, 2)" echo -e "\t-t The number of tracks per event. Optional. In default: 100" @@ -17,8 +17,8 @@ help() exit 1 # Exit script after printing help } -if [ ! -f "ActsExampleFatrasGeneric" ]; then - echo Please run this script under the directory where the executables are located +if [ ! -f "ActsExampleFatrasGeneric" ]; then + echo Please run this script under the directory where the executables are located exit 1 fi @@ -44,10 +44,10 @@ if [ "${detector}" == DD4hep ]; then if [ -z "${dd4hepInput}" ]; then echo "Empty input for --dd4hep-input. A file like $ ${output_file} echo "Test Detector: ${detector}" >> ${output_file} echo "BField: ${bField}" >> ${output_file} echo "Events: ${numEvents}" >> ${output_file} -echo "Tracks_per_event: ${numTracksPerEvent}" >> ${output_file} +echo "Tracks_per_event: ${numTracksPerEvent}" >> ${output_file} echo "****************************************" >> ${output_file} echo "*" echo "* job | eta | p | fit_time_per_event" >> ${output_file} jobID=0 - # Loop over the pt bins + # Loop over the pt bins for pt in 0.1 0.5 1.0 2.0 3.0 4.0 5.0 8.0 10.0 50.0 100.0 ; do - # Loop over the eta bin number + # Loop over the eta bin number for etaBin in 0 1 2 3 4; do - etaLow=$(echo "${etaBin}*0.5"|bc) - etaUp=$(echo "${etaBin}*0.5 + 0.5"|bc) - eta=$(echo "${etaBin}*0.5 + 0.25"|bc) + etaLow=$(echo "${etaBin}*0.5"|bc) + etaUp=$(echo "${etaBin}*0.5 + 0.5"|bc) + eta=$(echo "${etaBin}*0.5 + 0.25"|bc) # Run sim sim="${exe_dir}/ActsExampleFatras${detector} ${dd4hep_input} ${bField} -n ${numEvents} --gen-nparticles ${numTracksPerEvent} --gen-mom-gev ${pt}:${pt} --gen-eta ${etaLow}:${etaUp} --output-csv=1 --output-dir=data/sim_${detector}/e${numEvents}_t${numTracksPerEvent}_eta${eta}_pt${pt}" echo "Run sim with '${sim}'" eval ${sim} - - # Run reco + + # Run reco reco="$exe_dir/ActsExampleTruthTracks${detector} ${dd4hep_input} ${bField} --input-dir=data/sim_${detector}/e${numEvents}_t${numTracksPerEvent}_eta${eta}_pt${pt} --output-dir=data/reco_${detector}/e${numEvents}_t${numTracksPerEvent}_eta${eta}_pt${pt}" echo "Run reco with '${reco}'" eval ${reco} - + # Archive with Job ID mv data/reco_${detector}/e${numEvents}_t${numTracksPerEvent}_eta${eta}_pt${pt}/timing.tsv timing_${jobID}.tsv # Extract the fitting time @@ -114,7 +114,7 @@ jobID=0 fit_time_per_event=$(echo ${fit_time_str} | awk '{printf("%.10f\n", $1)}') fit_time_per_track=$(echo "${fit_time_per_event}/${numTracksPerEvent}"|bc -l) echo "${jobID}, ${etaBin}, ${pt}, ${fit_time_per_track}" >> ${output_file} - + # JobID let "jobID++" done diff --git a/Examples/Scripts/Benchmarking/propagation_timing.sh b/Examples/Scripts/Benchmarking/propagation_timing.sh index 391f10cd4c8..4ca4f77eb62 100755 --- a/Examples/Scripts/Benchmarking/propagation_timing.sh +++ b/Examples/Scripts/Benchmarking/propagation_timing.sh @@ -2,8 +2,8 @@ # # This script runs the propagation test with different steppers and different pT bins # -# arguments are: -# $ +# arguments are: +# $ time_stamp=`date +%s%N` run_directory=propagation_timing_${time_stamp} @@ -18,30 +18,30 @@ magfield='--bf-map ../ATLASBField_xyz.root' echo "***************************************" > ${output_file} echo "* Test: $1" >> ${output_file} echo "* Events: $2" >> ${output_file} -echo "* Tests/event: $300" >> ${output_file} +echo "* Tests/event: $300" >> ${output_file} echo "***************************************" >> ${output_file} echo "*" echo "* job | stepper | pt" >> ${output_file} jobID=0 -# Loop over the Pt bins +# Loop over the Pt bins for pt in 0.1 0.5 1.0 2.0 5.0 10.0 100.0 ; do - # Loop over the stepper + # Loop over the stepper for stepper in {0..2} ; do - + # Compute the name of the example executable executable="ActsExamplePropagation$1 -n$2 ${magfield} --prop-ntests $3 -j $4 --prop-pt-range ${pt} ${pt} --prop-stepper ${stepper} --output-root" echo "${jobID}, ${stepper}, ${pt}" >> ${output_file} eval ${executable} - + # Archive with Job ID mv timing.tsv timing_${jobID}.tsv mv propagation-steps.root propagation_steps_${jobID}.root - + # JobID let "jobID++" - + done done diff --git a/Examples/Scripts/Digitization/error_parameterisation.py b/Examples/Scripts/Digitization/error_parameterisation.py index f63218d509b..f06add9f4f3 100644 --- a/Examples/Scripts/Digitization/error_parameterisation.py +++ b/Examples/Scripts/Digitization/error_parameterisation.py @@ -237,8 +237,8 @@ def run_error_parametriation(
- Previous volume | - Back to index | + Previous volume | + Back to index | Next volume

Error Parameterisation : volume {vid}

Generated: {date}
diff --git a/Examples/Scripts/MaterialMapping/CMakeLists.txt b/Examples/Scripts/MaterialMapping/CMakeLists.txt index f921781cfca..f1a5dae6d1d 100644 --- a/Examples/Scripts/MaterialMapping/CMakeLists.txt +++ b/Examples/Scripts/MaterialMapping/CMakeLists.txt @@ -5,4 +5,3 @@ install( TARGETS ActsAnalysisMaterialComposition RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) - diff --git a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C index 46af22beed4..deeb7c275a3 100644 --- a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C +++ b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot.C @@ -17,7 +17,7 @@ /// Draw and save the histograms. void plot(std::vector Map, std::vector detectors, const std::string& name){ - + std::string sVol = "Detector volumes :"; for(auto const& det: detectors) { sVol += " "; @@ -98,8 +98,8 @@ void Initialise_hist(std::vector& detector_hist){ /// Fill the histograms for the detector. void Fill(std::vector& detector_hist, const std::string& input_file, std::vector detectors, const int& nbprocess){ - - + + Initialise_hist(detector_hist); //Get file, tree and set top branch address @@ -129,7 +129,7 @@ void Fill(std::vector& detector_hist, const std::string& input_file, std: tree->SetBranchAddress("sur_type",&sur_type); tree->SetBranchAddress("vol_id",&vol_id); - + int nentries = tree->GetEntries(); if(nentries > nbprocess && nbprocess != -1) nentries = nbprocess; // Loop over all the material tracks. @@ -144,7 +144,7 @@ void Fill(std::vector& detector_hist, const std::string& input_file, std: for(int j=0; jsize(); j++ ){ Acts::GeometryIdentifier ID; - + if(sur_id->at(j) != 0){ ID = Acts::GeometryIdentifier(sur_id->at(j)); } diff --git a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C index 798730fb27c..dfbb12effa9 100644 --- a/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C +++ b/Examples/Scripts/MaterialMapping/Mat_map_detector_plot_ratio.C @@ -22,7 +22,7 @@ void plot_ratio(std::vector Map_prop, std::vector Map_geant, std:: Proj_eta_prop->Divide(Unit_Map_prop->ProjectionX()); TH1D *Proj_eta_geant = (TH1D*) Map_geant[0]->ProjectionX()->Clone(); Proj_eta_geant->Divide(Unit_Map_geant->ProjectionX()); - + TH1D *Proj_phi_prop = (TH1D*) Map_prop[0]->ProjectionY()->Clone(); Proj_phi_prop->Divide(Unit_Map_prop->ProjectionY()); TH1D *Proj_phi_geant = (TH1D*) Map_geant[0]->ProjectionY()->Clone(); @@ -77,7 +77,7 @@ void plot_ratio(std::vector Map_prop, std::vector Map_geant, std:: delete vol; delete Unit_Map_prop; delete Unit_Map_geant; -} +} /// Plot the material ratio between the geantino scan and the map validation for each detector. diff --git a/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh b/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh index c6829b2c1b3..aa1945b92f3 100644 --- a/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh +++ b/Examples/Scripts/Python/Auto-tuning/Orion/launchMaterialAutoTuning.sh @@ -1,7 +1,7 @@ #!/bin/bash # We first run a single batch of jobs using the geant4 material track as an input. -# This will allow us to obtain a new material track file with the material associated with their respective surfaces. +# This will allow us to obtain a new material track file with the material associated with their respective surfaces. # This file is then move to the input directory using it will allow us to speed up the following mapping by 50% python3 ../Examples/Scripts/Python/material_mapping_optimisation.py --numberOfJobs 40 --topNumberOfEvents 10000 --inputPath "MaterialMappingInputDir" --outputPath "MaterialMappingOutputDir" --doPloting 2>&1 | tee log/opti_log_init.txt mv MaterialMappingOutputDir/optimised-material-map_tracks.root MaterialMappingInputDir/optimised-material-map_tracks.root diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py index 290d38e6bf8..caa1d9068c9 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_full_chain.py @@ -17,7 +17,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame: """Read the dataset from the different file, remove the pure duplicate tracks and combine the datasets""" """ @param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually) - @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event + @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event """ data = [] for f in CKS_files: @@ -32,7 +32,7 @@ def prepareInferenceData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]: """Prepare the data""" """ @param[in] data: input DataFrame to be prepared - @return: array of the network input and the corresponding truth + @return: array of the network input and the corresponding truth """ # Remove truth and useless variable target_column = "good/duplicate/fake" @@ -68,7 +68,7 @@ def clusterTracks( @param[in] event: input DataFrame that contain all track in one event @param[in] DBSCAN_eps: minimum radius used by the DBSCAN to cluster track together @param[in] DBSCAN_min_samples: minimum number of tracks needed for DBSCAN to create a cluster - @return: DataFrame identical to the output with an added column with the cluster + @return: DataFrame identical to the output with an added column with the cluster """ # Perform the DBSCAN clustering and sort the Db by cluster ID trackDir = event[["eta", "phi"]].to_numpy() diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py index 12c5fd1e392..15f596414f7 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_network.py @@ -11,7 +11,7 @@ def prepareDataSet(data: pd.DataFrame) -> pd.DataFrame: """Format the dataset that have been written from the Csv file""" """ @param[in] data: input DataFrame containing 1 event - @return: Formatted DataFrame + @return: Formatted DataFrame """ data = data # Remove tracks with less than 7 measurements diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py index 183ed24e851..ec098334764 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/ambiguity_solver_perf.py @@ -10,7 +10,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame: """Read the dataset from the different file, remove the pure duplicate tracks and combine the datasets""" """ @param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually) - @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event + @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each event """ data = [] for f in CKS_files: diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py index 96ebdcc8eb4..80ad297f30d 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_filter_full_chain.py @@ -15,7 +15,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame: """Read the dataset from the different files, remove the pure duplicate tracks and combine the datasets""" """ @param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually) - @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events + @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events """ data = [] for f in CKS_files: @@ -29,7 +29,7 @@ def prepareInferenceData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]: """Prepare the data""" """ @param[in] data: input DataFrame to be prepared - @return: array of the network input and the corresponding truth + @return: array of the network input and the corresponding truth """ # Remove truth and useless variable target_column = "good/duplicate/fake" @@ -60,7 +60,7 @@ def clusterSeed( @param[in] event: input DataFrame that contain all track in one event @param[in] DBSCAN_eps: minimum radius used by the DBSCAN to cluster track together @param[in] DBSCAN_min_samples: minimum number of tracks needed for DBSCAN to create a cluster - @return: DataFrame identical to the output with an added column with the cluster + @return: DataFrame identical to the output with an added column with the cluster """ # Perform the DBSCAN clustering and sort the Db by cluster ID trackDir = event[["eta", "phi", "vertexZ", "pT"]].to_numpy() diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py index a0b4c74107e..d161a868347 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/seed_solver_network.py @@ -11,7 +11,7 @@ def prepareDataSet(data: pd.DataFrame) -> pd.DataFrame: """Format the dataset that have been written from the Csv file""" """ @param[in] data: input DataFrame containing 1 event - @return: Formatted DataFrame + @return: Formatted DataFrame """ # Sort by particle ID data = data.sort_values("particleId") diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py b/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py index 3a2c69be005..71d86a2dcd9 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/train_ambiguity_solver.py @@ -22,7 +22,7 @@ def readDataSet(CKS_files: list[str]) -> pd.DataFrame: """Read the dataset from the different files, remove the pure duplicate tracks and combine the datasets""" """ @param[in] CKS_files: DataFrame contain the data from each track files (1 file per events usually) - @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events + @return: combined DataFrame containing all the track, ordered by events and then by truth particle ID in each events """ data = pd.DataFrame() for f in CKS_files: @@ -41,7 +41,7 @@ def prepareTrainingData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]: """Prepare the data""" """ @param[in] data: input DataFrame to be prepared - @return: array of the network input and the corresponding truth + @return: array of the network input and the corresponding truth """ # Remove truth and useless variable target_column = "good/duplicate/fake" @@ -80,7 +80,7 @@ def batchSplit(data: pd.DataFrame, batch_size: int) -> list[pd.DataFrame]: """ @param[in] data: input DataFrame to be cut into batch @param[in] batch_size: Number of truth particles per batch - @return: list of DataFrame, each element correspond to a batch + @return: list of DataFrame, each element correspond to a batch """ batch = [] pid = data[0][0] @@ -108,7 +108,7 @@ def computeLoss( ) -> torch.Tensor: """Compute one loss for each duplicate track associated with the particle""" """ - @param[in] score_good: score return by the model for the good track associated with this particle + @param[in] score_good: score return by the model for the good track associated with this particle @param[in] score_duplicate: list of the scores of all duplicate track associated with this particle @param[in] margin: Margin used in the computation of the MarginRankingLoss @return: return the updated loss @@ -124,8 +124,8 @@ def computeLoss( def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, float]: """Run the MLP on a batch and compute the corresponding efficiency and loss. If an optimiser is specified train the MLP.""" """ - @param[in] batch: list of DataFrame, each element correspond to a batch - @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch. + @param[in] batch: list of DataFrame, each element correspond to a batch + @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch. @return: array containing the number of particles, the number of particle where the good track was found and the loss """ # number of particles diff --git a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py index 3a0a54c6e9c..f2fac4d814c 100644 --- a/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py +++ b/Examples/Scripts/Python/MLAmbiguityResolution/train_seed_solver.py @@ -26,7 +26,7 @@ def readDataSet(Seed_files: list[str]) -> pd.DataFrame: """Read the dataset from the different files, remove the particle with only fakes and combine the datasets""" """ @param[in] Seed_files: DataFrame contain the data from each seed files (1 file per events usually) - @return: combined DataFrame containing all the seed, ordered by events and then by truth particle ID in each events + @return: combined DataFrame containing all the seed, ordered by events and then by truth particle ID in each events """ data = pd.DataFrame() for f in Seed_files: @@ -40,7 +40,7 @@ def prepareTrainingData(data: pd.DataFrame) -> tuple[np.ndarray, np.ndarray]: """Prepare the data""" """ @param[in] data: input DataFrame to be prepared - @return: array of the network input and the corresponding truth + @return: array of the network input and the corresponding truth """ # Remove truth and useless variable target_column = "good/duplicate/fake" @@ -74,7 +74,7 @@ def batchSplit(data: pd.DataFrame, batch_size: int) -> list[pd.DataFrame]: """ @param[in] data: input DataFrame to be cut into batch @param[in] batch_size: Number of truth particles per batch - @return: list of DataFrame, each element correspond to a batch + @return: list of DataFrame, each element correspond to a batch """ batch = [] pid = data[0][0] @@ -104,7 +104,7 @@ def computeLoss( ) -> torch.Tensor: """Compute one loss for each duplicate seed associated with the particle""" """ - @param[in] score_good: score return by the model for the good seed associated with this particle + @param[in] score_good: score return by the model for the good seed associated with this particle @param[in] score_duplicate: list of the scores of all duplicate seed associated with this particle @param[in] margin_duplicate: Margin used in the computation of the MarginRankingLoss for duplicate seeds @param[in] margin_fake: Margin used in the computation of the MarginRankingLoss for fake seeds @@ -130,8 +130,8 @@ def computeLoss( def scoringBatch(batch: list[pd.DataFrame], Optimiser=0) -> tuple[int, int, float]: """Run the MLP on a batch and compute the corresponding efficiency and loss. If an optimiser is specify train the MLP.""" """ - @param[in] batch: list of DataFrame, each element correspond to a batch - @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch. + @param[in] batch: list of DataFrame, each element correspond to a batch + @param[in] Optimiser: Optimiser for the MLP, if one is specify the network will be train on batch. @return: array containing the number of particles, the number of particle where the good seed was found and the loss """ # number of particles diff --git a/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C b/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C index 5dfa0fe3de8..04715e27fe7 100644 --- a/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C +++ b/Examples/Scripts/TrackingPerformance/defineReconstructionPerformance.C @@ -25,7 +25,7 @@ /// defines the efficiency, fake rate and duplicaiton rate. It aims to make /// custom definition and tuning of the reconstruction performance easier. /// Multiple files for the reconstructed tracks are allowed. -/// +/// /// NB: It's very likely that fiducal cuts are already imposed on the truth /// particles. Please check the selection criteria in the truth fitting example /// which writes out the 'track_finder_particles.root'. For instance, if the diff --git a/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C b/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C index 7c14908f778..46451a26710 100644 --- a/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C +++ b/Examples/Scripts/TrackingPerformance/reconstructionPerformance.C @@ -18,11 +18,11 @@ #include "CommonUtils.h" -/// This script allows a fast reading and replotting of the existing performance plots, e.g. 'trackeff_vs_*' and 'nMeasurements_vs_*', -/// from the root file 'performance_track_fitter.root' or 'performance_ckf.root'. -/// Note that redefinition of the tracking efficiency etc. is not possible with this script. +/// This script allows a fast reading and replotting of the existing performance plots, e.g. 'trackeff_vs_*' and 'nMeasurements_vs_*', +/// from the root file 'performance_track_fitter.root' or 'performance_ckf.root'. +/// Note that redefinition of the tracking efficiency etc. is not possible with this script. /// If you want to define your own efficiency etc., please refer to 'defineReconstructionPerformance.C'. -/// +/// void reconstructionPerformance(std::vector inputFileNames) { std::array emho = {nullptr, nullptr, nullptr}; std::vector tags = {"eta", "pT"}; @@ -46,7 +46,7 @@ void reconstructionPerformance(std::vector inputFileNames) { auto file = TFile::Open(fileName.c_str(), "read"); unsigned int itag = 0; for (const auto& t : tags) { - unsigned int ipar = 0; + unsigned int ipar = 0; for (const auto& p : params) { std::string hName = p + std::string("_vs_") + t; emho[itag]->cd(ipar+1); diff --git a/Fatras/Geant4/CMakeLists.txt b/Fatras/Geant4/CMakeLists.txt index e9912af1d97..074f558b78f 100644 --- a/Fatras/Geant4/CMakeLists.txt +++ b/Fatras/Geant4/CMakeLists.txt @@ -25,4 +25,4 @@ install( RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) install( DIRECTORY include/ActsFatras - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) \ No newline at end of file + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/LICENSE b/LICENSE index 14e2f777f6c..a612ad9813b 100644 --- a/LICENSE +++ b/LICENSE @@ -35,7 +35,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" diff --git a/Plugins/Detray/CMakeLists.txt b/Plugins/Detray/CMakeLists.txt index a2a1585063d..a57ffa914a4 100644 --- a/Plugins/Detray/CMakeLists.txt +++ b/Plugins/Detray/CMakeLists.txt @@ -2,7 +2,7 @@ add_library( ActsPluginDetray SHARED src/DetrayConverter.cpp) -add_dependencies(ActsPluginDetray +add_dependencies(ActsPluginDetray detray::core covfie::core vecmem::core) @@ -12,10 +12,10 @@ target_include_directories( PUBLIC $ $) - + target_link_libraries( - ActsPluginDetray - PUBLIC + ActsPluginDetray + PUBLIC ActsCore detray::core detray::core_array @@ -28,7 +28,7 @@ install( TARGETS ActsPluginDetray EXPORT ActsPluginDetrayTargets LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) - + install( DIRECTORY include/Acts DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/Plugins/ExaTrkX/CMakeLists.txt b/Plugins/ExaTrkX/CMakeLists.txt index 11453ce34c6..d2481f585c8 100644 --- a/Plugins/ExaTrkX/CMakeLists.txt +++ b/Plugins/ExaTrkX/CMakeLists.txt @@ -1,10 +1,10 @@ -set(SOURCES +set(SOURCES src/buildEdges.cpp src/ExaTrkXPipeline.cpp ) if(ACTS_EXATRKX_ENABLE_ONNX) - list(APPEND SOURCES + list(APPEND SOURCES src/OnnxEdgeClassifier.cpp src/OnnxMetricLearning.cpp src/CugraphTrackBuilding.cpp @@ -12,7 +12,7 @@ if(ACTS_EXATRKX_ENABLE_ONNX) endif() if(ACTS_EXATRKX_ENABLE_TORCH) - list(APPEND SOURCES + list(APPEND SOURCES src/TorchEdgeClassifier.cpp src/TorchMetricLearning.cpp src/BoostTrackBuilding.cpp @@ -28,7 +28,7 @@ add_library( target_include_directories( ActsPluginExaTrkX - PUBLIC + PUBLIC $ $ ) @@ -80,7 +80,7 @@ if(ACTS_EXATRKX_ENABLE_TORCH) TorchScatter::TorchScatter ) - # Should not discard TorchScatter even if its not needed at this point + # Should not discard TorchScatter even if its not needed at this point # since we need the scatter_max operation in the torch script later target_link_options( ActsPluginExaTrkX diff --git a/Plugins/Podio/CMakeLists.txt b/Plugins/Podio/CMakeLists.txt index 568a0ba80b1..606f488f1f3 100644 --- a/Plugins/Podio/CMakeLists.txt +++ b/Plugins/Podio/CMakeLists.txt @@ -18,8 +18,8 @@ target_link_libraries( # message(STATUS "IO HANDLERS: ${PODIO_IO_HANDLERS}") PODIO_GENERATE_DATAMODEL( - ActsPodioEdm - ${CMAKE_CURRENT_LIST_DIR}/edm.yml + ActsPodioEdm + ${CMAKE_CURRENT_LIST_DIR}/edm.yml headers sources IO_BACKEND_HANDLERS ${PODIO_IO_HANDLERS} @@ -27,9 +27,9 @@ PODIO_GENERATE_DATAMODEL( PODIO_ADD_DATAMODEL_CORE_LIB(ActsPodioEdm "${headers}" "${sources}") -target_link_libraries(ActsPluginPodio PUBLIC - ActsPodioEdm - ROOT::Core +target_link_libraries(ActsPluginPodio PUBLIC + ActsPodioEdm + ROOT::Core podio::podio podio::podioRootIO ) @@ -76,4 +76,3 @@ if (${ROOT_VERSION} GREATER 6) "${CMAKE_CURRENT_BINARY_DIR}/libActsPodioEdmDict_rdict.pcm" DESTINATION "${CMAKE_INSTALL_LIBDIR}" COMPONENT dev) endif() - diff --git a/Plugins/Podio/edm.yml b/Plugins/Podio/edm.yml index 4509de3e6fa..0b5996c37c7 100644 --- a/Plugins/Podio/edm.yml +++ b/Plugins/Podio/edm.yml @@ -22,7 +22,7 @@ components: " ActsPodioEdm::Surface: - Members: + Members: - int surfaceType - int boundsType - uint64_t geometryId @@ -96,7 +96,7 @@ datatypes: ActsPodioEdm::TrackState: Description: "Local state on a track" Author : "Paul Gessinger, CERN" - Members: + Members: - ActsPodioEdm::TrackStateInfo data // local information - ActsPodioEdm::Surface referenceSurface // reference surface @@ -128,4 +128,3 @@ datatypes: # ExtraCode: # declaration: > # auto data() { return &m_obj->data; } - diff --git a/Plugins/TGeo/CMakeLists.txt b/Plugins/TGeo/CMakeLists.txt index d52f7ff1139..cd61a3839b1 100644 --- a/Plugins/TGeo/CMakeLists.txt +++ b/Plugins/TGeo/CMakeLists.txt @@ -1,6 +1,6 @@ -set(library_sources - src/TGeoCylinderDiscSplitter.cpp +set(library_sources + src/TGeoCylinderDiscSplitter.cpp src/TGeoDetectorElement.cpp src/TGeoLayerBuilder.cpp src/TGeoParser.cpp @@ -17,7 +17,7 @@ endif() add_library( ActsPluginTGeo SHARED ${library_sources}) - + target_include_directories( ActsPluginTGeo diff --git a/Tests/Data/README.md b/Tests/Data/README.md index a36e28f704e..152e7278d70 100644 --- a/Tests/Data/README.md +++ b/Tests/Data/README.md @@ -12,4 +12,4 @@ helper functions from the `CommonHelpers` package: ... auto path = Acts::Test::getDataPath("some-data-file.csv"); -``` \ No newline at end of file +``` diff --git a/Tests/Data/material-map.json b/Tests/Data/material-map.json index 7e2145f861c..a59a29a7d17 100644 --- a/Tests/Data/material-map.json +++ b/Tests/Data/material-map.json @@ -129,7 +129,7 @@ ], "mapMaterial": true, "type": "interpolated3D" - } + } } } ] @@ -191,7 +191,7 @@ "thickness": 5.19966459274292 } ] - ], + ], "mapMaterial": true, "mappingType": "Default", "type": "binned" @@ -249,7 +249,7 @@ "thickness": 5.167008399963379 } ] - ], + ], "mapMaterial": true, "mappingType": "Default", "type": "binned" @@ -456,4 +456,4 @@ } ] } -} \ No newline at end of file +} diff --git a/Tests/Data/vertexing_event_mu20_beamspot.csv b/Tests/Data/vertexing_event_mu20_beamspot.csv index 2397aaf1c52..01a1711f152 100644 --- a/Tests/Data/vertexing_event_mu20_beamspot.csv +++ b/Tests/Data/vertexing_event_mu20_beamspot.csv @@ -1,2 +1,2 @@ posX,posY,posZ,covXX,covYY,covZZ --0.5,-0.5,0,0.0001,0.0001,1764 \ No newline at end of file +-0.5,-0.5,0,0.0001,0.0001,1764 diff --git a/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv b/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv index 7ff43648309..d2ceaf55b1f 100644 --- a/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv +++ b/Tests/Data/vertexing_event_mu20_vertices_AMVF.csv @@ -22,4 +22,4 @@ posX,posY,posZ,covXX,covXY,covXZ,covYX,covYY,covYZ,covZX,covZY,covZZ,nTracks,trk -0.501144,-0.502245,-38.9118,0.000100207,-7.1557e-07,-2.06063e-05,-7.1557e-07,0.000100144,3.25276e-05,-2.06063e-05,3.25276e-05,0.0143517,5,9.58214e-06,24.2323,0 -0.4979,-0.499479,-52.6555,0.000100094,9.8453e-08,7.08961e-06,9.8453e-08,0.000101352,-5.56276e-05,7.08961e-06,-5.56276e-05,0.0198663,3,0.96703,2.24272,0 -0.500966,-0.500699,9.55961,0.00010039,3.49323e-07,6.85953e-05,3.49323e-07,0.000100734,4.98106e-05,6.85953e-05,4.98106e-05,0.0278505,9,0.0599596,6.80763,0 --0.499095,-0.499195,48.8015,0.000100826,3.65595e-07,4.70832e-05,3.65595e-07,0.000101036,5.27768e-05,4.70832e-05,5.27768e-05,0.0304281,2,0.827012,5.87081,0 \ No newline at end of file +-0.499095,-0.499195,48.8015,0.000100826,3.65595e-07,4.70832e-05,3.65595e-07,0.000101036,5.27768e-05,4.70832e-05,5.27768e-05,0.0304281,2,0.827012,5.87081,0 diff --git a/Tests/UnitTests/Benchmarks/CMakeLists.txt b/Tests/UnitTests/Benchmarks/CMakeLists.txt index 264d0a9f129..d79cc32d06d 100644 --- a/Tests/UnitTests/Benchmarks/CMakeLists.txt +++ b/Tests/UnitTests/Benchmarks/CMakeLists.txt @@ -1 +1 @@ -add_unittest(BenchmarkTools BenchmarkTools.cpp) \ No newline at end of file +add_unittest(BenchmarkTools BenchmarkTools.cpp) diff --git a/Tests/UnitTests/Core/Detector/CMakeLists.txt b/Tests/UnitTests/Core/Detector/CMakeLists.txt index e8fb8bcb5fa..5ad3c568db0 100644 --- a/Tests/UnitTests/Core/Detector/CMakeLists.txt +++ b/Tests/UnitTests/Core/Detector/CMakeLists.txt @@ -25,5 +25,3 @@ add_unittest(Portal PortalTests.cpp) add_unittest(PortalGenerators PortalGeneratorsTests.cpp) add_unittest(VolumeStructureBuilder VolumeStructureBuilderTests.cpp) add_unittest(MultiWireStructureBuilder MultiWireStructureBuilderTests.cpp) - - diff --git a/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp b/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp index 0a0d9434c90..434b808e309 100644 --- a/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp +++ b/Tests/UnitTests/Core/Geometry/ProtoLayerTests.cpp @@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(ProtoLayerTests) { std::stringstream sstream; protoLayerRot.toStream(sstream); std::string oString = R"(ProtoLayer with dimensions (min/max) -Extent in space : +Extent in space : - value : binX | range = [-6.66104, 6.66104] - value : binY | range = [-4.85241, 4.85241] - value : binZ | range = [-6, 6] diff --git a/Tests/UnitTests/Core/MagneticField/CMakeLists.txt b/Tests/UnitTests/Core/MagneticField/CMakeLists.txt index 822c917da28..f207d5d6228 100644 --- a/Tests/UnitTests/Core/MagneticField/CMakeLists.txt +++ b/Tests/UnitTests/Core/MagneticField/CMakeLists.txt @@ -2,4 +2,4 @@ add_unittest(ConstantBField ConstantBFieldTests.cpp) add_unittest(InterpolatedBFieldMap InterpolatedBFieldMapTests.cpp) #add_unittest(MagneticFieldInterfaceConsistency MagneticFieldInterfaceConsistencyTests.cpp) add_unittest(SolenoidBField SolenoidBFieldTests.cpp) -add_unittest(MagneticFieldProvider MagneticFieldProviderTests.cpp) \ No newline at end of file +add_unittest(MagneticFieldProvider MagneticFieldProviderTests.cpp) diff --git a/Tests/UnitTests/Core/Navigation/CMakeLists.txt b/Tests/UnitTests/Core/Navigation/CMakeLists.txt index a5785f38006..69ebc582396 100644 --- a/Tests/UnitTests/Core/Navigation/CMakeLists.txt +++ b/Tests/UnitTests/Core/Navigation/CMakeLists.txt @@ -4,4 +4,3 @@ add_unittest(NavigationState NavigationStateTests.cpp) add_unittest(NavigationStateUpdaters NavigationStateUpdatersTests.cpp) add_unittest(DetectorNavigator DetectorNavigatorTests.cpp) add_unittest(MultiWireNavigation MultiWireNavigationTests.cpp) - diff --git a/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp b/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp index 5d527c4a2ce..361dd461282 100644 --- a/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp +++ b/Tests/UnitTests/Core/Surfaces/BoundaryToleranceTestsRefs.hpp @@ -128,30 +128,30 @@ const struct { } rectShiftedDimensions; const std::vector rectShiftedTestPoints = { - {0.00, 1.50}, {0.00, 1.80}, {0.00, 2.10}, {0.00, 2.40}, {0.00, 2.70}, - {0.00, 3.00}, {0.00, 3.30}, {0.00, 3.60}, {0.00, 3.90}, {0.00, 4.20}, - {0.00, 4.50}, {0.40, 1.50}, {0.40, 1.80}, {0.40, 2.10}, {0.40, 2.40}, - {0.40, 2.70}, {0.40, 3.00}, {0.40, 3.30}, {0.40, 3.60}, {0.40, 3.90}, - {0.40, 4.20}, {0.40, 4.50}, {0.80, 1.50}, {0.80, 1.80}, {0.80, 2.10}, - {0.80, 2.40}, {0.80, 2.70}, {0.80, 3.00}, {0.80, 3.30}, {0.80, 3.60}, - {0.80, 3.90}, {0.80, 4.20}, {0.80, 4.50}, {1.20, 1.50}, {1.20, 1.80}, - {1.20, 2.10}, {1.20, 2.40}, {1.20, 2.70}, {1.20, 3.00}, {1.20, 3.30}, - {1.20, 3.60}, {1.20, 3.90}, {1.20, 4.20}, {1.20, 4.50}, {1.60, 1.50}, - {1.60, 1.80}, {1.60, 2.10}, {1.60, 2.40}, {1.60, 2.70}, {1.60, 3.00}, - {1.60, 3.30}, {1.60, 3.60}, {1.60, 3.90}, {1.60, 4.20}, {1.60, 4.50}, - {2.00, 1.50}, {2.00, 1.80}, {2.00, 2.10}, {2.00, 2.40}, {2.00, 2.70}, - {2.00, 3.00}, {2.00, 3.30}, {2.00, 3.60}, {2.00, 3.90}, {2.00, 4.20}, - {2.00, 4.50}, {2.40, 1.50}, {2.40, 1.80}, {2.40, 2.10}, {2.40, 2.40}, - {2.40, 2.70}, {2.40, 3.00}, {2.40, 3.30}, {2.40, 3.60}, {2.40, 3.90}, - {2.40, 4.20}, {2.40, 4.50}, {2.80, 1.50}, {2.80, 1.80}, {2.80, 2.10}, - {2.80, 2.40}, {2.80, 2.70}, {2.80, 3.00}, {2.80, 3.30}, {2.80, 3.60}, - {2.80, 3.90}, {2.80, 4.20}, {2.80, 4.50}, {3.20, 1.50}, {3.20, 1.80}, - {3.20, 2.10}, {3.20, 2.40}, {3.20, 2.70}, {3.20, 3.00}, {3.20, 3.30}, - {3.20, 3.60}, {3.20, 3.90}, {3.20, 4.20}, {3.20, 4.50}, {3.60, 1.50}, - {3.60, 1.80}, {3.60, 2.10}, {3.60, 2.40}, {3.60, 2.70}, {3.60, 3.00}, - {3.60, 3.30}, {3.60, 3.60}, {3.60, 3.90}, {3.60, 4.20}, {3.60, 4.50}, - {4.00, 1.50}, {4.00, 1.80}, {4.00, 2.10}, {4.00, 2.40}, {4.00, 2.70}, - {4.00, 3.00}, {4.00, 3.30}, {4.00, 3.60}, {4.00, 3.90}, {4.00, 4.20}, + {0.00, 1.50}, {0.00, 1.80}, {0.00, 2.10}, {0.00, 2.40}, {0.00, 2.70}, + {0.00, 3.00}, {0.00, 3.30}, {0.00, 3.60}, {0.00, 3.90}, {0.00, 4.20}, + {0.00, 4.50}, {0.40, 1.50}, {0.40, 1.80}, {0.40, 2.10}, {0.40, 2.40}, + {0.40, 2.70}, {0.40, 3.00}, {0.40, 3.30}, {0.40, 3.60}, {0.40, 3.90}, + {0.40, 4.20}, {0.40, 4.50}, {0.80, 1.50}, {0.80, 1.80}, {0.80, 2.10}, + {0.80, 2.40}, {0.80, 2.70}, {0.80, 3.00}, {0.80, 3.30}, {0.80, 3.60}, + {0.80, 3.90}, {0.80, 4.20}, {0.80, 4.50}, {1.20, 1.50}, {1.20, 1.80}, + {1.20, 2.10}, {1.20, 2.40}, {1.20, 2.70}, {1.20, 3.00}, {1.20, 3.30}, + {1.20, 3.60}, {1.20, 3.90}, {1.20, 4.20}, {1.20, 4.50}, {1.60, 1.50}, + {1.60, 1.80}, {1.60, 2.10}, {1.60, 2.40}, {1.60, 2.70}, {1.60, 3.00}, + {1.60, 3.30}, {1.60, 3.60}, {1.60, 3.90}, {1.60, 4.20}, {1.60, 4.50}, + {2.00, 1.50}, {2.00, 1.80}, {2.00, 2.10}, {2.00, 2.40}, {2.00, 2.70}, + {2.00, 3.00}, {2.00, 3.30}, {2.00, 3.60}, {2.00, 3.90}, {2.00, 4.20}, + {2.00, 4.50}, {2.40, 1.50}, {2.40, 1.80}, {2.40, 2.10}, {2.40, 2.40}, + {2.40, 2.70}, {2.40, 3.00}, {2.40, 3.30}, {2.40, 3.60}, {2.40, 3.90}, + {2.40, 4.20}, {2.40, 4.50}, {2.80, 1.50}, {2.80, 1.80}, {2.80, 2.10}, + {2.80, 2.40}, {2.80, 2.70}, {2.80, 3.00}, {2.80, 3.30}, {2.80, 3.60}, + {2.80, 3.90}, {2.80, 4.20}, {2.80, 4.50}, {3.20, 1.50}, {3.20, 1.80}, + {3.20, 2.10}, {3.20, 2.40}, {3.20, 2.70}, {3.20, 3.00}, {3.20, 3.30}, + {3.20, 3.60}, {3.20, 3.90}, {3.20, 4.20}, {3.20, 4.50}, {3.60, 1.50}, + {3.60, 1.80}, {3.60, 2.10}, {3.60, 2.40}, {3.60, 2.70}, {3.60, 3.00}, + {3.60, 3.30}, {3.60, 3.60}, {3.60, 3.90}, {3.60, 4.20}, {3.60, 4.50}, + {4.00, 1.50}, {4.00, 1.80}, {4.00, 2.10}, {4.00, 2.40}, {4.00, 2.70}, + {4.00, 3.00}, {4.00, 3.30}, {4.00, 3.60}, {4.00, 3.90}, {4.00, 4.20}, {4.00, 4.50} }; //const std::vector rectShiftedClosestPoints = { @@ -182,34 +182,34 @@ const std::vector rectShiftedTestPoints = { // {3.00, 4.00} //}; const std::vector rectShiftedDistances = { - 1.118033988749895, 1.019803902718557, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0198039027185568, 1.118033988749895, 0.7810249675906654, 0.6324555320336759, - 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6324555320336757, 0.7810249675906654, - 0.5385164807134504, 0.28284271247461895, 0.19999999999999996, - 0.19999999999999996, 0.19999999999999996, 0.19999999999999996, - 0.19999999999999996, 0.19999999999999996, 0.19999999999999996, - 0.28284271247461845, 0.5385164807134504, 0.5, 0.19999999999999996, - -0.10000000000000009, -0.20000000000000018, -0.20000000000000018, - -0.20000000000000018, -0.20000000000000018, -0.20000000000000018, - -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, - -0.10000000000000009, -0.3999999999999999, -0.6000000000000001, - -0.6000000000000001, -0.6000000000000001, -0.3999999999999999, - -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, - -0.10000000000000009, -0.3999999999999999, -0.7000000000000002, -1.0, - -0.7000000000000002, -0.3999999999999999, -0.10000000000000009, - 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009, - -0.3999999999999999, -0.5999999999999996, -0.5999999999999996, - -0.5999999999999996, -0.3999999999999999, -0.10000000000000009, - 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009, - -0.19999999999999973, -0.19999999999999973, -0.19999999999999973, - -0.19999999999999973, -0.19999999999999973, -0.10000000000000009, - 0.1999999999999993, 0.5, 0.5385164807134505, 0.28284271247461906, - 0.20000000000000018, 0.20000000000000018, 0.20000000000000018, - 0.20000000000000018, 0.20000000000000018, 0.20000000000000018, - 0.20000000000000018, 0.2828427124746186, 0.5385164807134505, 0.7810249675906655, - 0.6324555320336759, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, - 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, - 0.6324555320336757, 0.7810249675906655, 1.118033988749895, 1.019803902718557, + 1.118033988749895, 1.019803902718557, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0198039027185568, 1.118033988749895, 0.7810249675906654, 0.6324555320336759, + 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6324555320336757, 0.7810249675906654, + 0.5385164807134504, 0.28284271247461895, 0.19999999999999996, + 0.19999999999999996, 0.19999999999999996, 0.19999999999999996, + 0.19999999999999996, 0.19999999999999996, 0.19999999999999996, + 0.28284271247461845, 0.5385164807134504, 0.5, 0.19999999999999996, + -0.10000000000000009, -0.20000000000000018, -0.20000000000000018, + -0.20000000000000018, -0.20000000000000018, -0.20000000000000018, + -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, + -0.10000000000000009, -0.3999999999999999, -0.6000000000000001, + -0.6000000000000001, -0.6000000000000001, -0.3999999999999999, + -0.10000000000000009, 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, + -0.10000000000000009, -0.3999999999999999, -0.7000000000000002, -1.0, + -0.7000000000000002, -0.3999999999999999, -0.10000000000000009, + 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009, + -0.3999999999999999, -0.5999999999999996, -0.5999999999999996, + -0.5999999999999996, -0.3999999999999999, -0.10000000000000009, + 0.1999999999999993, 0.5, 0.5, 0.19999999999999996, -0.10000000000000009, + -0.19999999999999973, -0.19999999999999973, -0.19999999999999973, + -0.19999999999999973, -0.19999999999999973, -0.10000000000000009, + 0.1999999999999993, 0.5, 0.5385164807134505, 0.28284271247461906, + 0.20000000000000018, 0.20000000000000018, 0.20000000000000018, + 0.20000000000000018, 0.20000000000000018, 0.20000000000000018, + 0.20000000000000018, 0.2828427124746186, 0.5385164807134505, 0.7810249675906655, + 0.6324555320336759, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, + 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, + 0.6324555320336757, 0.7810249675906655, 1.118033988749895, 1.019803902718557, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0198039027185568, 1.118033988749895 }; diff --git a/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp b/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp index e0bcbfe2e25..09bade86ae8 100644 --- a/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp +++ b/Tests/UnitTests/Core/Visualization/Visualization3DTests.cpp @@ -83,7 +83,7 @@ l 4 1 BOOST_AUTO_TEST_CASE(Visualization3DTesterPly) { // Test the tester std::string validPly = R"(ply -format ascii 1.0 +format ascii 1.0 comment made by Greg Turk comment this file is a cube element vertex 8 @@ -122,7 +122,7 @@ end_header // Test the tester - contains 3 errors std::string invalidPly = R"(ply -format ascii 1.0 +format ascii 1.0 comment made by Greg Turk comment this file is a cube element vertex 8 diff --git a/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt b/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt index c7fc2ca7437..cde4b28ae4d 100644 --- a/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt +++ b/Tests/UnitTests/Examples/Io/Json/CMakeLists.txt @@ -1,3 +1,3 @@ set(unittest_extra_libraries ActsExamplesDigitization ActsExamplesIoJson) -add_unittest(JsonDigitizationConfig JsonDigitizationConfigTests.cpp) \ No newline at end of file +add_unittest(JsonDigitizationConfig JsonDigitizationConfigTests.cpp) diff --git a/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt b/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt index 89b8cb6d01b..8cc71759a99 100644 --- a/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt +++ b/Tests/UnitTests/Plugins/Cuda/Seeding/CMakeLists.txt @@ -1,3 +1,2 @@ add_executable(ActsUnitTestSeedFinderCuda SeedFinderCudaTest.cpp) target_link_libraries(ActsUnitTestSeedFinderCuda PRIVATE ${unittest_extra_libraries} Boost::boost) - diff --git a/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt b/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt index 7a738a7b384..0dfe2a66901 100644 --- a/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt +++ b/Tests/UnitTests/Plugins/DD4hep/CMakeLists.txt @@ -18,7 +18,7 @@ dd4hep_generate_rootmap(ActsTestsDD4hepFactories) find_library(dd4hep_core_library DDCore) -if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24.0") +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24.0") set(factory_path "$>") else() set(factory_path "${CMAKE_CURRENT_BINARY_DIR}") @@ -34,10 +34,10 @@ if (NOT "${dd4hep_core_library}" STREQUAL "dd4hep_core_library-NOTFOUND") add_unittest(${_test} ${_test}Tests.cpp) add_dependencies(ActsUnitTest${_test} Components_ActsTestsDD4hepFactories) if(APPLE) - set_property(TEST ${_test} PROPERTY ENVIRONMENT - "DYLD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{DYLD_LIBRARY_PATH}") + set_property(TEST ${_test} PROPERTY ENVIRONMENT + "DYLD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{DYLD_LIBRARY_PATH}") else() - set_property(TEST ${_test} PROPERTY ENVIRONMENT + set_property(TEST ${_test} PROPERTY ENVIRONMENT "LD_LIBRARY_PATH=${DD4HEP_LIBRARY_PATH}:${factory_path}:$ENV{LD_LIBRARY_PATH}") endif() endforeach() diff --git a/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp b/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp index 715a2a09159..5620ca8d585 100644 --- a/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp +++ b/Tests/UnitTests/Plugins/DD4hep/DD4hepCylindricalDetectorTests.cpp @@ -45,7 +45,7 @@ const char* beampipe_head_xml = - + )""""; const char* nec_head_xml = @@ -79,7 +79,7 @@ const char* plugin_xml = - + @@ -90,7 +90,7 @@ const char* plugin_xml = - + @@ -100,7 +100,7 @@ const char* plugin_xml = - + @@ -111,7 +111,7 @@ const char* plugin_xml = - + @@ -121,7 +121,7 @@ const char* plugin_xml = - + @@ -132,7 +132,7 @@ const char* plugin_xml = - + )"""; diff --git a/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp b/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp index ca2f7e87092..2974cd5ac32 100644 --- a/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp +++ b/Tests/UnitTests/Plugins/ExaTrkX/ExaTrkXMetricHookTests.cpp @@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE(same_graph) { BOOST_AUTO_TEST_CASE(same_graph_large_numbers) { // clang-format off std::int64_t k = 100'000; - + std::vector truthGraph = { 1,2, 2,3, diff --git a/cmake/ActsConfig.cmake.in b/cmake/ActsConfig.cmake.in index 8e8caad4935..6e4fcb5e06e 100644 --- a/cmake/ActsConfig.cmake.in +++ b/cmake/ActsConfig.cmake.in @@ -40,7 +40,7 @@ foreach(_component ${Acts_FIND_COMPONENTS}) endif() endforeach() -# add this to the current CMAKE_MODULE_PATH to find third party modules +# add this to the current CMAKE_MODULE_PATH to find third party modules # that not provide a XXXConfig.cmake or XXX-config.cmake file list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/Modules) diff --git a/cmake/ActsCreatePackageConfig.cmake b/cmake/ActsCreatePackageConfig.cmake index 6cfbd570f07..dd73597d2ff 100644 --- a/cmake/ActsCreatePackageConfig.cmake +++ b/cmake/ActsCreatePackageConfig.cmake @@ -22,12 +22,12 @@ install( ${PROJECT_BINARY_DIR}/ActsConfigVersion.cmake ${PROJECT_BINARY_DIR}/ActsConfig.cmake DESTINATION ${install_package_config_dir}) - + # install third party FindXXX.cmake files install( FILES ${CMAKE_CURRENT_LIST_DIR}/FindOnnxRuntime.cmake - DESTINATION ${install_package_config_dir}/Modules) + DESTINATION ${install_package_config_dir}/Modules) # install target configs for all available components foreach(_component ${_components}) diff --git a/cmake/pythia8307-cpp20.patch b/cmake/pythia8307-cpp20.patch deleted file mode 100644 index c6a1890dfaf..00000000000 --- a/cmake/pythia8307-cpp20.patch +++ /dev/null @@ -1,129 +0,0 @@ -From 09ef584f1ca797d84c1c0af18ec06b33d6c0d2d0 Mon Sep 17 00:00:00 2001 -From: Paul Gessinger -Date: Thu, 24 Mar 2022 16:08:26 +0100 -Subject: [PATCH] fixes for C++20 build - ---- - include/Pythia8/SusyLesHouches.h | 6 +++--- - src/HadronWidths.cc | 8 ++++---- - src/NucleonExcitations.cc | 8 ++++---- - src/PythiaParallel.cc | 2 +- - 4 files changed, 12 insertions(+), 12 deletions(-) - -diff --git a/include/Pythia8/SusyLesHouches.h b/include/Pythia8/SusyLesHouches.h -index 2f1d9fd..5090c00 100644 ---- a/include/Pythia8/SusyLesHouches.h -+++ b/include/Pythia8/SusyLesHouches.h -@@ -28,7 +28,7 @@ namespace Pythia8 { - public: - - //Constructor. -- LHblock() : idnow(0), qDRbar(), i(), val() {} ; -+ LHblock() : idnow(0), qDRbar(), i(), val() {} ; - - //Does block exist? - bool exists() { return int(entry.size()) == 0 ? false : true ; }; -@@ -129,7 +129,7 @@ namespace Pythia8 { - template class LHmatrixBlock { - public: - //Constructor. Set uninitialized and explicitly zero. -- LHmatrixBlock() : entry(), qDRbar(), val() { -+ LHmatrixBlock() : entry(), qDRbar(), val() { - initialized=false; - for (i=1;i<=size;i++) { - for (j=1;j<=size;j++) { -@@ -208,7 +208,7 @@ namespace Pythia8 { - template class LHtensor3Block { - public: - //Constructor. Set uninitialized and explicitly zero. -- LHtensor3Block() : entry(), qDRbar(), val() { -+ LHtensor3Block() : entry(), qDRbar(), val() { - initialized=false; - for (i=1;i<=size;i++) { - for (j=1;j<=size;j++) { -diff --git a/src/HadronWidths.cc b/src/HadronWidths.cc -index ccc5c72..95a5cb1 100644 ---- a/src/HadronWidths.cc -+++ b/src/HadronWidths.cc -@@ -867,7 +867,7 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA, - return 0.; - - // Integrate mass of A. -- auto f = [=](double mA) { -+ auto f = [=,this](double mA) { - return pow(pCMS(eCM, mA, m0B), lType) * mDistr(idA, mA); }; - if (!integrateGauss(result, f, mMinA, min(mMaxA, eCM - m0B))) - success = false; -@@ -879,7 +879,7 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA, - return 0.; - - // Integrate mass of B. -- auto f = [=](double mB) { -+ auto f = [=,this](double mB) { - return pow(pCMS(eCM, m0A, mB), lType) * mDistr(idB, mB); }; - if (!integrateGauss(result, f, mMinB, min(mMaxB, eCM - m0A))) - success = false; -@@ -891,10 +891,10 @@ double HadronWidths::psSize(double eCM, ParticleDataEntryPtr prodA, - return 0.; - - // Define integrand of outer integral. -- auto I = [=, &success](double mA) { -+ auto I = [=, &success, this](double mA) { - - // Define integrand of inner integral. -- auto f = [=](double mB) { -+ auto f = [=,this](double mB) { - return pow(pCMS(eCM, mA, mB), lType) - * mDistr(idA, mA) * mDistr(idB, mB); }; - double res; -diff --git a/src/NucleonExcitations.cc b/src/NucleonExcitations.cc -index b5eef8f..a82383a 100644 ---- a/src/NucleonExcitations.cc -+++ b/src/NucleonExcitations.cc -@@ -502,7 +502,7 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA, - return 0.; - - // Integrate mass of A. -- auto f = [=](double mA) { -+ auto f = [=, this](double mA) { - return pCMS(eCM, mA, m0B) * hadronWidthsPtr->mDistr(idA, mA); }; - if (!integrateGauss(result, f, mMinA, min(mMaxA, eCM - m0B))) - success = false; -@@ -514,7 +514,7 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA, - return 0.; - - // Integrate mass of B. -- auto f = [=](double mB) { -+ auto f = [=,this](double mB) { - return pCMS(eCM, m0A, mB) * hadronWidthsPtr->mDistr(idB, mB); }; - if (!integrateGauss(result, f, mMinB, min(mMaxB, eCM - m0A))) - success = false; -@@ -526,10 +526,10 @@ double NucleonExcitations::psSize(double eCM, ParticleDataEntry& prodA, - return 0.; - - // Define integrand of outer integral. -- auto I = [=, &success](double mA) { -+ auto I = [=, &success, this](double mA) { - - // Define integrand of inner integral. -- auto f = [=](double mB) { -+ auto f = [=,this](double mB) { - return pCMS(eCM, mA, mB) - * hadronWidthsPtr->mDistr(idA, mA) - * hadronWidthsPtr->mDistr(idB, mB); }; -diff --git a/src/PythiaParallel.cc b/src/PythiaParallel.cc -index 81450e2..7ec3a92 100644 ---- a/src/PythiaParallel.cc -+++ b/src/PythiaParallel.cc -@@ -106,7 +106,7 @@ bool PythiaParallel::init(function customInit) { - bool initSuccess = true; - - for (int iPythia = 0; iPythia < numThreads; iPythia += 1) { -- initThreads.emplace_back([=, &seeds, &initSuccess]() { -+ initThreads.emplace_back([=, &seeds, &initSuccess, this]() { - Pythia* pythiaPtr = new Pythia(settings, particleData, false); - pythiaObjects[iPythia] = unique_ptr(pythiaPtr); - pythiaObjects[iPythia]->settings.flag("Print:quiet", true); --- -2.31.1 - diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 566fcc547c7..b9f309d4da3 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -3,7 +3,7 @@ # the code. when running running on readthedocs.org, the build is fully driven # by Sphinx, including running Doxygen. # -# this CMake-based build is only intended for local development. +# this CMake-based build is only intended for local development. set(sphinx_build ${CMAKE_CURRENT_SOURCE_DIR}/_build) set(sphinx_doctrees ${CMAKE_CURRENT_SOURCE_DIR}/_build/doctrees) diff --git a/docs/acts_project.md b/docs/acts_project.md index 6027024a462..519fde62560 100644 --- a/docs/acts_project.md +++ b/docs/acts_project.md @@ -11,8 +11,8 @@ ACTS is designed as a library that *contains components* for assembling a track The library is structured as follows: * The `Core` library contains considered production ready components (except for components located in the `Acts::Experimental` namespace) that can be interfaced to experiment code * The `Plugin` folder contains additional extensions that can be optionally switched on to use increase the functionality of the software suite, but also in general increase dependencies to other/thirdparty libraries - * The `Fatras` library contains a fast track simulation module, that is based on the same concepts that are used for the [ATLAS Fatras](https://cds.cern.ch/record/1091969) fast track simulation - * An `Examples` folder that contains a minimal test framework used for showcasing and integration testing, + * The `Fatras` library contains a fast track simulation module, that is based on the same concepts that are used for the [ATLAS Fatras](https://cds.cern.ch/record/1091969) fast track simulation + * An `Examples` folder that contains a minimal test framework used for showcasing and integration testing, * A `Tests` folder that contains unit tests, benchmark tests and other integration tests diff --git a/docs/codeguide.md b/docs/codeguide.md index c9c310b459d..402ba00be6f 100644 --- a/docs/codeguide.md +++ b/docs/codeguide.md @@ -160,7 +160,7 @@ static constexpr double kMagic = 1.23; ``` Variables defined in the `Acts::UnitConstants` namespace are exempted for usability reasons and use regular variable naming instead. - + ### N.6: Enum values use eCamelCase Enum values use CamelCase with a `e` prefix. They are not really constants but symbolic values, e.g. they can never have an address, and warant a separate convention. diff --git a/docs/contribution/clang_tidy.md b/docs/contribution/clang_tidy.md index fce2a5d5567..c5087b5a5cc 100644 --- a/docs/contribution/clang_tidy.md +++ b/docs/contribution/clang_tidy.md @@ -17,7 +17,7 @@ a report on the issues it detected. The report should give you an error / warning code, e.g. `readability-braces-around-statements`. The LLVM documentation has details on all possible error codes, in this particular example you would find it [here][readability]. This page will tell you that -`clang-tidy` wants you to replace +`clang-tidy` wants you to replace ```cpp if (condition) diff --git a/docs/contribution/documentation_cheatsheet.md b/docs/contribution/documentation_cheatsheet.md index f7826741664..352a168c826 100644 --- a/docs/contribution/documentation_cheatsheet.md +++ b/docs/contribution/documentation_cheatsheet.md @@ -46,7 +46,7 @@ A link to {class}`Acts::Volume`. ## Pull in API documentation -* Code: +* Code: ```text :::{doxygenclass} Acts::Volume diff --git a/docs/contribution/release.md b/docs/contribution/release.md index 3186eec1129..1b5dfb91d3e 100644 --- a/docs/contribution/release.md +++ b/docs/contribution/release.md @@ -45,7 +45,7 @@ a7ee09d 2022-05-25 11:17 +0200 Luis Falda Coelho │ o fix: Bug in xyz 4ceddf3 2022-05-25 10:26 +0200 Luis Falda Coelho │ o─┘ feat: ITk seedFilter integration and seed quality confirmation (#1201) ``` -You can now push the updated `releases` branch to the remote `releases` branch using `git push -u upstream releases`. +You can now push the updated `releases` branch to the remote `releases` branch using `git push -u upstream releases`. On push, a CI job should run and create an additional commit on the `releases` branch, which bumps a number of version numbers. That commit is going to be the one tagged with the correct version. It doesn't hurt to make sure that commit looks right, as in it bumps to a sensible next version number. diff --git a/docs/contribution/run_formatting.md b/docs/contribution/run_formatting.md index 27c9fa3df02..75f915cea88 100644 --- a/docs/contribution/run_formatting.md +++ b/docs/contribution/run_formatting.md @@ -48,13 +48,13 @@ Formatting of the Python source code uses the library ```console $ pip install black -$ black +$ black ``` :::{tip} It is **strongly recommended** to use a [virtual environment](https://realpython.com/python-virtual-environments-a-primer/) for -this purpose! For example, run +this purpose! For example, run ```console $ python -m venv venv diff --git a/docs/core/definitions/algebra.rst b/docs/core/definitions/algebra.rst index 2bf884682b5..9d35ef34992 100644 --- a/docs/core/definitions/algebra.rst +++ b/docs/core/definitions/algebra.rst @@ -8,7 +8,7 @@ The basic scalar type can be defined via this file and is set per default to `do #ifdef ACTS_CUSTOM_SCALAR using ActsScalar = ACTS_CUSTOM_SCALAR; - #else + #else using ActsScalar = double; #endif diff --git a/docs/core/definitions/units.md b/docs/core/definitions/units.md index 72848c662c2..432f60de6db 100644 --- a/docs/core/definitions/units.md +++ b/docs/core/definitions/units.md @@ -4,4 +4,3 @@ ```{eval-rst} .. doxygennamespace:: Acts::UnitConstants ``` - diff --git a/docs/core/geometry/geometry_id.md b/docs/core/geometry/geometry_id.md index 3d75686bec9..81d6afd764d 100644 --- a/docs/core/geometry/geometry_id.md +++ b/docs/core/geometry/geometry_id.md @@ -16,4 +16,3 @@ While it is used in ACTS-internal applications such as material mapping, it is n members: kVolumeMask,kBoundaryMask,kLayerMask,kApproachMask,kSensitiveMask,kExtraMask --- ::: - diff --git a/docs/core/geometry/index.md b/docs/core/geometry/index.md index ce04855c889..b0009a36ef1 100644 --- a/docs/core/geometry/index.md +++ b/docs/core/geometry/index.md @@ -21,4 +21,3 @@ surfaces legacy/legacy layerless/layerless ::: - diff --git a/docs/core/geometry/legacy/building.md b/docs/core/geometry/legacy/building.md index 5d24bd61626..3bb251bde20 100644 --- a/docs/core/geometry/legacy/building.md +++ b/docs/core/geometry/legacy/building.md @@ -38,7 +38,7 @@ While `DD4hep` offers a descriptive language with a dedicated extension mechanis that can be used by ACTS to interpret the underlying geometry hierarchy and and structure, there is no such guarantee when having the already as built `TGeo` geometry in hand. Therefore a dedicated ACTS configuration file based on `json` can be provided that allows -to specify parsing restrictions for sub detectors. +to specify parsing restrictions for sub detectors. ``` @@ -52,9 +52,9 @@ layer are determined by parsing the provided surfaces. Additionally, an envelope covering the surfaces can be chosen. ```{note} -There exist standard layer builders that are designed to build cylindrical, disk like +There exist standard layer builders that are designed to build cylindrical, disk like and planar layers and perform the ordering of the surfaces onto those layers. These -builders are called from the top level translation entry points from either `TGeo` +builders are called from the top level translation entry points from either `TGeo` or `DD4hep`. ``` @@ -80,7 +80,6 @@ For cylindrical detector setups, a dedicated {class}`Acts::CylinderVolumeBuilder provided, which performs a variety of volume building, packing and gluing. ```{note} -For most cylindrical detectors, there exist automated glueing and geometry building +For most cylindrical detectors, there exist automated glueing and geometry building modules that take care of the glueing process. ``` - diff --git a/docs/core/geometry/legacy/layers.md b/docs/core/geometry/legacy/layers.md index 5f55092c6ab..89f435bcd25 100644 --- a/docs/core/geometry/legacy/layers.md +++ b/docs/core/geometry/legacy/layers.md @@ -30,5 +30,3 @@ objects are confined together in a special {type}`Acts::LayerArray` class and ca contained by a {class}`Acts::TrackingVolume`. ![LayerArray](../figures/LayerArray.png) - - diff --git a/docs/core/geometry/legacy/legacy.md b/docs/core/geometry/legacy/legacy.md index 17e2f428cbe..613f753a37a 100644 --- a/docs/core/geometry/legacy/legacy.md +++ b/docs/core/geometry/legacy/legacy.md @@ -2,7 +2,7 @@ # Legacy geometry module -:::{todo} +:::{todo} Describe how the legacy geometry used to work and how it differs from the [layerless geometry](#layerless_geometry) ::: diff --git a/docs/core/geometry/material.md b/docs/core/geometry/material.md index 8943e88f816..c143fe9f123 100644 --- a/docs/core/geometry/material.md +++ b/docs/core/geometry/material.md @@ -40,4 +40,3 @@ mapping process, that is in further described below. * {class}`Acts::ProtoSurfaceMaterialT`, only binning description (without material) to be used in the material mapping process, which can be specified with a templated binning description. - diff --git a/docs/core/magnetic_field.md b/docs/core/magnetic_field.md index 01fd88edb98..1924234bde0 100644 --- a/docs/core/magnetic_field.md +++ b/docs/core/magnetic_field.md @@ -8,7 +8,7 @@ source of field data. Algorithms which need magnetic field information (e.g. {class}`Acts::AtlasStepper`, {class}`Acts::EigenStepper`) accept the magnetic -field as an explicit argument. +field as an explicit argument. ## Provider interface @@ -72,7 +72,7 @@ values. The library itself does not make any assumptions on the content of this context type (it is implemented using `std::any`), but passes a reference through the call-chain to the field implementation. An experiment specific field implementation is then expected to performa cast to the concrete type, -and use the contents. +and use the contents. An example use case of the context could be to look up conditions data / records for the value of the magnetic field at the time of the event. @@ -162,7 +162,7 @@ to speed it up. ::: ACTS also provides a field provider that calculates the field vectors -analytically for a [solenoid](https://en.wikipedia.org/wiki/Solenoid) field. +analytically for a [solenoid](https://en.wikipedia.org/wiki/Solenoid) field. :::{figure} figures/bfield/quiver.png :width: 600 @@ -178,7 +178,7 @@ The implementation has configurable solenoid parameters: ::: :::{note} -A configuration of +A configuration of ```cpp SolenoidBField::Config cfg; cfg.length = 5.8_m; @@ -239,4 +239,3 @@ analytical implementation and is much faster to lookup: :::{doxygenclass} Acts::MagneticFieldProvider ::: - diff --git a/docs/core/misc/figures/AxisBoundaryTypes.svg b/docs/core/misc/figures/AxisBoundaryTypes.svg index 0068af7d6b0..2efeb30e675 100644 --- a/docs/core/misc/figures/AxisBoundaryTypes.svg +++ b/docs/core/misc/figures/AxisBoundaryTypes.svg @@ -4,7 +4,7 @@ - + bound x @@ -15,7 +15,7 @@ max - + @@ -65,4 +65,3 @@ - diff --git a/docs/core/misc/logging.md b/docs/core/misc/logging.md index 019f6487969..003898be91d 100644 --- a/docs/core/misc/logging.md +++ b/docs/core/misc/logging.md @@ -91,7 +91,7 @@ There are two approaches to logger integration: dummy logger using {func}`Acts::getDummyLogger`. It is more suitable to pass into functions that might be called from other ACTS functions (rather than construction a local logger via `getDefaultLogger`, or creating logger - instances on the fly). + instances on the fly). ::: 2. Passing logger instances to high level components, and rely on ACTS code to @@ -135,13 +135,13 @@ $ LD_PRELOAD= path/to/your/executable Generally, log levels in ACTS are only of informative value: even {enumerator}`Acts::Logging::Level::ERROR` and {enumerator}`Acts::Logging::Level::FATAL` will only print a -messages, **and not terminate execution**. +messages, **and not terminate execution**. This is desirable in an experiment context, where jobs should not immediately terminate when ACTS encounters something that is logged as an error. In a test context, however, this behavior is not optimal: the tests should ensure in known configurations errors do not occur, or only in specific circumstances. To -solve this, ACTS implements an optional log *threshold* mechanism. +solve this, ACTS implements an optional log *threshold* mechanism. The threshold mechanism is steered via two CMake options: `ACTS_ENABLE_LOG_FAILURE_THRESHOLD` and `ACTS_LOG_FAILURE_THRESHOLD`. Depending diff --git a/docs/core/reconstruction/figures/gsf_bethe_heitler_approx.svg b/docs/core/reconstruction/figures/gsf_bethe_heitler_approx.svg index 0f401a1f1db..c0b935a81b9 100644 --- a/docs/core/reconstruction/figures/gsf_bethe_heitler_approx.svg +++ b/docs/core/reconstruction/figures/gsf_bethe_heitler_approx.svg @@ -21,19 +21,19 @@ - - @@ -41,8 +41,8 @@ z - @@ -53,42 +53,42 @@ L 0 3.5 - - - @@ -109,29 +109,29 @@ z - @@ -152,43 +152,43 @@ z - @@ -225,34 +225,34 @@ z - @@ -289,18 +289,18 @@ z - @@ -315,60 +315,60 @@ z - - - - @@ -384,8 +384,8 @@ z - @@ -422,28 +422,28 @@ L -3.5 0 - @@ -461,36 +461,36 @@ z - @@ -508,23 +508,23 @@ z - @@ -555,34 +555,34 @@ z - @@ -618,739 +618,739 @@ z - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1405,22 +1405,22 @@ z - - @@ -1450,9 +1450,9 @@ L 86.6 54.570437 - diff --git a/docs/core/reconstruction/pattern_recognition/seeding.md b/docs/core/reconstruction/pattern_recognition/seeding.md index 67fe50c6748..ebb72cd94bc 100644 --- a/docs/core/reconstruction/pattern_recognition/seeding.md +++ b/docs/core/reconstruction/pattern_recognition/seeding.md @@ -8,7 +8,7 @@ reconstruction algorithm (henceforth: the tracking). The tracking then tries to find all measurements belonging to a single particle in this direction in order to reconstruct the track. This means, if no seed exists for a particle, this particle will not be reconstructed. On the other hand, finding too many seeds -which either correspond to particles with already existing seeds or +which either correspond to particles with already existing seeds or which do not correspond to particles at all increases the time needed for tracking. @@ -58,11 +58,11 @@ Representation of the search for triplet combinations in the $(r, z)$ plane. The ### The Seed Finder -The {func}`Acts::SeedFinder::createSeedsForGroup` function receives three iterators -over SPs constructed from detector layers of increasing radii. The seedfinder will -then attempt to create seeds, with each seed containing exactly one SP returned by +The {func}`Acts::SeedFinder::createSeedsForGroup` function receives three iterators +over SPs constructed from detector layers of increasing radii. The seedfinder will +then attempt to create seeds, with each seed containing exactly one SP returned by each of the three iterators. It starts by iterating over SPs in the middle layer -(2nd iterator), and within this loop separately iterates once over the bottom SP +(2nd iterator), and within this loop separately iterates once over the bottom SP and once over the top SP. Within each of the nested loops, SP pairs are tested for compatibility by applying a set of configurable cuts that can be tested with two SP only (pseudorapidity, origin along $z$-axis, distance in $r$ between SP, @@ -123,9 +123,9 @@ where $B_z$ is the magnetic field. :align: center The r-z projection of the detector with the same charged particle track. The track is depicted with the same colours as in the previous figure. ::: - + The track is not an ideal helix. At each detector layer (or any other material) -scattering may occur making the helix approximate. +scattering may occur making the helix approximate. The algorithm will check if the triplet forms a nearly straight line in the $r/z$ plane (see {numref}`r-zCoordinates`) as the particle path in the $r/z$ plane is unaffected by the magnetic field. This is split into two parts; the first test occurs before the calculation of the helix @@ -158,7 +158,7 @@ The last cut applied in this function is on the transverse impact parameter (or distance of closest approach), which is the distance of the perigee of a track from the interaction region in $mm$ of detector radius. It is calculated and cut on before storing all top SP compatible with both the current middle SP and current -bottom SP. +bottom SP. (impactParameter)= :::{figure} figures/seeding/impactParameter.svg @@ -167,13 +167,13 @@ bottom SP. Helix representation in $x/y$ reference frame with central space-point (SP$_m$) in the origin. ::: -Assuming the middle layer SP is in the origin of the $x/y$ frame, as in {numref}`impactParameter`. +Assuming the middle layer SP is in the origin of the $x/y$ frame, as in {numref}`impactParameter`. The distance between the centre of the helix and the interaction point (IP) is given by \begin{equation*} (x_0 + r_m)^2 + y_0^2 = (R + d_0)^2 \quad \xrightarrow{R^2 = x_0^2 + y_0^2} \quad \frac{d_0^2}{R^2} + 2 \frac{d_0}{R} = \frac{2 x_0 r_m + r_m^2}{R^2} . \end{equation*} -Considering that $d_0 << R$ (we can neglect the term proportional to $d_0^2$) and using the $u/v$ line equation calculated previously, +Considering that $d_0 << R$ (we can neglect the term proportional to $d_0^2$) and using the $u/v$ line equation calculated previously, the cut can now be estimated using a linear function in the $u/v$ plane instead of a quartic function: \begin{equation*} @@ -183,12 +183,12 @@ d_0 \leq \left| \left( A - B \cdot r_M \right) \cdot r_M \right| ### The Seed Filter After creating the potential seeds we apply a seed filter procedure that compares the seeds with other SPs compatible with the seed curvature. -This process ranks the potential seeds based on certain quality criteria and selects the ones that are more likely to produce high-quality tracks +This process ranks the potential seeds based on certain quality criteria and selects the ones that are more likely to produce high-quality tracks The filter is divided into two functions {func}`Acts::SeedFilter::filterSeeds_2SpFixed` and {func}`Acts::SeedFilter::filterSeeds_1SpFixed`. -The first function compares the middle and bottom layer SPs of the seeds to other top layer SPs; seeds only differing in top SP are -compatible if they have similar helix radius with the same sign (i.e. the same charge). The SPs must have a minimum distance in -detector radius, such that SPs from the same layer cannot be considered compatible. The second function iterates over the seeds with +The first function compares the middle and bottom layer SPs of the seeds to other top layer SPs; seeds only differing in top SP are +compatible if they have similar helix radius with the same sign (i.e. the same charge). The SPs must have a minimum distance in +detector radius, such that SPs from the same layer cannot be considered compatible. The second function iterates over the seeds with only a common middle layer SP and selects the higher quality combinations. :::{doxygenfunction} Acts::SeedFilter::filterSeeds_2SpFixed @@ -196,7 +196,7 @@ only a common middle layer SP and selects the higher quality combinations. ::: This function assigns a weight (which should correspond to the likelihood that -a seed is good) to all seeds and applies detector-specific selection of seeds based on weights. +a seed is good) to all seeds and applies detector-specific selection of seeds based on weights. The weight is a “soft cut”, which means that it is only used to discard tracks if many seeds are created for the same middle SP. This process is important to improving computational @@ -210,7 +210,7 @@ w = (c_1 \cdot N_{t} - c_2 \cdot d_0 - c_3 |z_0| ) + \textnormal{detector specif The transverse ($d_0$) and longitudinal ($z_0$) impact parameters are multiplied by a configured factor and subtracted from the weight, as seeds with higher impact parameters are assumed to be less likely to stem from a particle than another seed using the same middle SP with -smaller impact parameters. The number of compatible seeds ($N_t$) is used to increase the weight, as a higher number of measurements +smaller impact parameters. The number of compatible seeds ($N_t$) is used to increase the weight, as a higher number of measurements will lead to higher quality tracks. Finally, the weight can also be affected by optional detector-specific cuts. The {func}`Acts::SeedFilter::filterSeeds_2SpFixed` function also includes a configurable {struct}`Acts::SeedConfirmationRangeConfig` seed confirmation step that, when enabled, diff --git a/docs/core/reconstruction/track_fitting.md b/docs/core/reconstruction/track_fitting.md index 797b2724bd5..d1de9d8f72d 100644 --- a/docs/core/reconstruction/track_fitting.md +++ b/docs/core/reconstruction/track_fitting.md @@ -335,7 +335,7 @@ Gx2FitterOptions( ... ) : ... {} Gx2FitterOptions() = delete; -... +... //common options: // geoContext, magFieldContext, calibrationContext, extensions, // propagatorPlainOptions, referenceSurface, multipleScattering, diff --git a/docs/core/visualization/3d.md b/docs/core/visualization/3d.md index 81d40a2d1c1..3ded6fe9e88 100644 --- a/docs/core/visualization/3d.md +++ b/docs/core/visualization/3d.md @@ -2,8 +2,8 @@ A very lightweight layer for visualizing ACTS geometry objects and event data model is provided within the Core component. ACTS does not provide a viewer per se, but instead it was chosen to plug a visitor that can then be used for visualizing the given objects. -The visitor has to implement the `IVisualization3D` interface and can then straight forwardly used with the visualization helper structs. -Two visualization helpers that implement industry standard 3D formats can be used from this component, +The visitor has to implement the `IVisualization3D` interface and can then straight forwardly used with the visualization helper structs. +Two visualization helpers that implement industry standard 3D formats can be used from this component, but evidently any other visitor can be plugged in as long as it satisfies the `IVisualization` interface. The two provided visualization visitors are: @@ -12,7 +12,7 @@ The two provided visualization visitors are: ## Behind the scenes -All display actions rely on the `Polyhedron` representation of Surfaces, +All display actions rely on the `Polyhedron` representation of Surfaces, i.e. each surface can be at least approximated by a list of vertices and a definition of faces connecting these vertices. As a special feature, the `Polyhedron` can be displayed as a triangulated mesh of surfaces, i.e. each surface is divided into triangles that build up the object to display. @@ -35,7 +35,7 @@ Example of an angular error cone: :::{image} figures/AngularError.png :width: 800 -:alt: Display of an angular error. +:alt: Display of an angular error. ::: Example of a 2D cartesian error on a plane: diff --git a/docs/examples/examples.rst b/docs/examples/examples.rst index 3b9777d14e0..25c152cd90b 100644 --- a/docs/examples/examples.rst +++ b/docs/examples/examples.rst @@ -5,7 +5,7 @@ ACTS ships with a comprehensive set of examples. These examples leverage a custom event-processing framework, that is expressly **not intended to be used in any kind of production environment**. These examples demonstrate how to set up and configure different components in the ACTS library to assemble a track -reconstruction chain. +reconstruction chain. At the time of writing, there are two aspects to the ACTS examples: @@ -13,8 +13,8 @@ At the time of writing, there are two aspects to the ACTS examples: #. Example executables for different purposes. This is the original form of examples provided by ACTS. A large number of separate executables are be built if ``-DACTS_BUILD_EXAMPLES=ON``, the exact set is also influenced by which - plugins are enabled in the build. These executables are configured by a number of - command line options, for example to set the number of events to be processed, + plugins are enabled in the build. These executables are configured by a number of + command line options, for example to set the number of events to be processed, or which output formats to read from / write to. #. Standalone Performance and Analysis applications based on ROOT. These applications @@ -29,7 +29,7 @@ At the time of writing, there are two aspects to the ACTS examples: modifications to the actual python code will be easy, and encouraged. -.. note:: This section of the documentation contains a set of *how-to* guides, +.. note:: This section of the documentation contains a set of *how-to* guides, which describe different example executables, and how to combine them with one another to assemble several workflows. diff --git a/docs/examples/howto/analysis_apps.rst b/docs/examples/howto/analysis_apps.rst index 888e9f873bb..7659771b7d4 100644 --- a/docs/examples/howto/analysis_apps.rst +++ b/docs/examples/howto/analysis_apps.rst @@ -3,15 +3,15 @@ Analysis applications ================================ -The ACTS examples come with a certain variety of ROOT based validation and performance writers, +The ACTS examples come with a certain variety of ROOT based validation and performance writers, whose output can be use to understand various aspects of the reconstruction in more detail. The building of these applications can be switched on by setting ``ACTS_BUILD_ANALYSIS_APPS=On``, which requires (on top of the Core dependencies) ROOT for the analysis code. -These analysis applications are steered via BOOST program options, hence a quick `` -h`` will +These analysis applications are steered via BOOST program options, hence a quick `` -h`` will quickly show the relevant options, they can be executed in silent mode, i.e. without opening -a window, when specifying the `-s` option. +a window, when specifying the `-s` option. Material Composition Analysis ----------------------------- @@ -114,14 +114,14 @@ The source code for this application can be found in ``Examples/Scripts/Material Tracking Performance Analysis ----------------------------- -Two different applications are available for analysing the output of track fitting and +Two different applications are available for analysing the output of track fitting and track finding, sitting on top of the corresponding ROOT output writers from the Example applications. **Residuals and Pull analysis per layer** -To investigate the per layer residual and pull distributions, one can use the +To investigate the per layer residual and pull distributions, one can use the `ActsAnalysisResidualAndPulls` application, which runs on top of the ROOT file produced by the ``RootTrackStatesWriter``. @@ -144,11 +144,11 @@ The following options are available: --save arg (=png) Output save format (to be interpreted by ROOT). -Again, this application is capable of running in silent mode (``-s``) without +Again, this application is capable of running in silent mode (``-s``) without opening a dedicated screen window. Originally designed for the ``Acts::KalmanFilter`` output, it is capable of -producing histograms of the ``--predicted``, ``--filtered`` and ``--smoothed`` track +producing histograms of the ``--predicted``, ``--filtered`` and ``--smoothed`` track states (i.e. track parameters) and will do so per layer and volume. On request (``--fit``) the resulting distributions can be fitted for the summary plots @@ -157,10 +157,10 @@ that are created, otherwise the RMS and its mean are taken. The application will (by parsing the geometry id range) automatically determine the different layers and volumes and create detailed and summary plots for all of them. -As a example, the pull distributions for *predicted, filtered* and *smoothed* track states +As a example, the pull distributions for *predicted, filtered* and *smoothed* track states is shown below. -.. figure:: figures/analysis_apps/aa_rp_layers.png +.. figure:: figures/analysis_apps/aa_rp_layers.png :width: 500 @@ -213,12 +213,12 @@ The following options are available: --outliers Auxiliary information for outliers --shared Auxiliary information for shared -This application is highly configurable and produces residual and pull +This application is highly configurable and produces residual and pull (regional, integral and summary) plots for the fitted perigee parameters of track fitting. It can be run in ```eta,phi,pT``` bins, and as the different histograms in the various bins -will require different histogram ranges, these will be automatically determined. +will require different histogram ranges, these will be automatically determined. -However, this process is relatively slow and makes comparisons between runs difficult, +However, this process is relatively slow and makes comparisons between runs difficult, thus the range configuration can be written out by specifying a ``--config-output`` JSON file, and successively re-using it with a ``--config-input`` flag in future analysis runs. @@ -228,14 +228,13 @@ using the ``--peak-events`` option. Some example histograms (transverse impact parameter ``d0`` distribution or a summary plot showing the number of detector hits, are added below). -.. figure:: figures/analysis_apps/aa_ts_d0.png +.. figure:: figures/analysis_apps/aa_ts_d0.png :width: 500 -.. figure:: figures/analysis_apps/aa_ts_nhits.png +.. figure:: figures/analysis_apps/aa_ts_nhits.png :width: 500 The source code for these applications can be found in ``Examples/Scripts/TrackingPerformance``. - diff --git a/docs/examples/howto/digitization_config.md b/docs/examples/howto/digitization_config.md index 6fa05550b98..b34e235dcda 100644 --- a/docs/examples/howto/digitization_config.md +++ b/docs/examples/howto/digitization_config.md @@ -40,5 +40,3 @@ Examples/Algorithms/Digitization/scripts/smearing-config.py \ --digi-smear-types=0 \ --digi-smear-parameters=12.5 ``` - - diff --git a/docs/examples/howto/material_mapping.rst b/docs/examples/howto/material_mapping.rst index 87a6c8ad699..a8657a470a6 100644 --- a/docs/examples/howto/material_mapping.rst +++ b/docs/examples/howto/material_mapping.rst @@ -7,7 +7,7 @@ Howto run the material mapping and validation This documentation is for running the material mapping in the Examples framework. Documentation on how to use the Core library directly for material mapping is found :ref:`here`. -When performing track reconstruction, the proper amount of material crossed by the particle needs to be accounted for. This material is originally available in the detector simulation with a lot of details, which would make it expensive to directly use. To circumvent this issue, the material is mapped onto different surfaces in the tracking geometry. This process will be performed in 3 steps: +When performing track reconstruction, the proper amount of material crossed by the particle needs to be accounted for. This material is originally available in the detector simulation with a lot of details, which would make it expensive to directly use. To circumvent this issue, the material is mapped onto different surfaces in the tracking geometry. This process will be performed in 3 steps: - first, a JSON geometry file is created, it will be used to configure which surface the material is mapped onto and with which binning. - second, a Geant4 simulation is used to collect the material inside the detector from the detailed geometry. @@ -22,7 +22,7 @@ As a prerequisite you will need to build ACTS with the Examples, Geant4 and the For this particular example the ODD will also be needed. To use it, don't forget to get the corresponding submodule and then recompile the ACTS code if needed. .. code-block:: console - + $ git submodule init $ git submodule update @@ -35,7 +35,7 @@ First we need to extract the list of all the surfaces and volumes in our detecto .. code-block:: - $ python3 /Examples/Scripts/Python/geometry.py + $ python3 /Examples/Scripts/Python/geometry.py Ideally the following options should be used in the python file: @@ -146,7 +146,7 @@ The next step is to do a geantino scan of our detector. For this we will use the .. code-block:: console - $ python3 /Examples/Scripts/Python/material_recording.py + $ python3 /Examples/Scripts/Python/material_recording.py The result of the geantino scan will be a root file containing material tracks. Those contain the direction and production vertex of the geantino, the total material accumulated and all the interaction points in the detector. @@ -157,7 +157,7 @@ With the surfaces map and the material track we can finally do the material mapp .. code-block:: console - $ python3 /Examples/Scripts/Python/material_mapping.py + $ python3 /Examples/Scripts/Python/material_mapping.py Note that technically when using DD4hep (in particular for the ODD) defining a ``matDeco`` in the main function is not strictly necessary as the DD4hep geometry can hold the information of which surface to map onto with which binning. We will ignore this option, since the goal of this guide is to explain how to make a material map regardless of the detector. @@ -169,7 +169,7 @@ Depending on what you want to do there are three options you can change: - ``mappingStep``: determine the step size used in the sampling of the volume in the volume mapping. By default, the material interaction point obtained from G4 is accumulated at the intersection between the track and the volume material. The mapping will be therefore incorrect if the material extends through the bin. To avoid this, additional material points are created every ``mappingStep`` [mm] along the trajectory. The mapping step should be small compared to the bin size. - ``readCachedSurfaceInformation`` if added the material-surface association will be taken from the input material track file (doesn't work with geantino file, you need to use the material track file obtained from running the material mapping). -In addition to root and JSON output, one can also output the material map to a Cbor file (Concise Binary Object Representation). Doing so results in a file about 10 time smaller than the JSON one, but that file is no longer human-readable. This should be done once the map has been optimised and you want to export it. +In addition to root and JSON output, one can also output the material map to a Cbor file (Concise Binary Object Representation). Doing so results in a file about 10 time smaller than the JSON one, but that file is no longer human-readable. This should be done once the map has been optimised and you want to export it. .. note:: You can map onto surfaces and volumes separately (for example if you want to optimise first one then the other). In that case after mapping one of those you will need to use the resulting JSON material map as an input to the ``mat-input-file``. @@ -186,7 +186,7 @@ By default, the Geantino scan is performed with no spread in :math:`z_0` and :ma .. code-block:: console - $ python3 /Examples/Scripts/Python/material_validation.py + $ python3 /Examples/Scripts/Python/material_validation.py To do the validation, five root macros are available in ``scripts/MaterialMapping``: @@ -232,4 +232,4 @@ Can be use with X,Y,Z is a list of volumes, this will plot the material ratio be Using a different detector -------------------------- -If you want to use a different type of detector, you will first need to ensure that the relevant packages were added during the compilation. After this you can just replace the detector initialisation in the different main function. For reference you can have a look on the ODD for DD4Hep detector and on the ITk for TGeo detector. +If you want to use a different type of detector, you will first need to ensure that the relevant packages were added during the compilation. After this you can just replace the detector initialisation in the different main function. For reference you can have a look on the ODD for DD4Hep detector and on the ITk for TGeo detector. diff --git a/docs/examples/howto/run_ckf_auto_tuning.rst b/docs/examples/howto/run_ckf_auto_tuning.rst index 20c9892a189..f12051cef0c 100644 --- a/docs/examples/howto/run_ckf_auto_tuning.rst +++ b/docs/examples/howto/run_ckf_auto_tuning.rst @@ -1,6 +1,6 @@ ACTS Tutorial on Auto-Tuning in CombinatorialKalmanFilter (CKF) =============================================================== -The tracking algorithms require a number of pre-initialized parameters that are often hand-tuned to obtain high performance. Usually, the value of these parameters change as the underlying geometrical or magnetic configuration changes. An automatic tuning of these parameters can be very useful for obtaining highly efficient parameter configuration as well as for studying different detector geometries. This tutorial is based on parameter optimization studies using two different optimization frameworks: Optuna and Orion. Eight parameters of Track Seeding algorithm have been tuned using these frameworks and their performance have been studied on CKF. +The tracking algorithms require a number of pre-initialized parameters that are often hand-tuned to obtain high performance. Usually, the value of these parameters change as the underlying geometrical or magnetic configuration changes. An automatic tuning of these parameters can be very useful for obtaining highly efficient parameter configuration as well as for studying different detector geometries. This tutorial is based on parameter optimization studies using two different optimization frameworks: Optuna and Orion. Eight parameters of Track Seeding algorithm have been tuned using these frameworks and their performance have been studied on CKF. Prerequisites ------------- @@ -11,7 +11,7 @@ Since Optuna and Orion are independent frameworks, these need to be installed se $ source /cvmfs/sft.cern.ch/lcg/views/LCG_100/x86_64-centos7-gcc10-opt/setup.sh $ python3 -m venv PYTHON_VIRTUAL_ENV $ source PYTHON_VIRTUAL_ENV/bin/activate - $ export PYTHONPATH= + $ export PYTHONPATH= $ python -m pip install --upgrade pip $ pip install -r acts/Examples/Python/tests/requirements.txt $ pip install pytest --upgrade @@ -31,7 +31,7 @@ Once this setup is ready, at each new login, just do: $ source /cvmfs/sft.cern.ch/lcg/views/LCG_100/x86_64-centos7-gcc10-opt/setup.sh $ source PYTHON_VIRTUAL_ENV/bin/activate - $ export PYTHONPATH= + $ export PYTHONPATH= $ source build/python/setup.sh How auto-tuning works @@ -40,8 +40,8 @@ A list of parameters and their range are provided to optimization framework. The Score = Efficiency - (fakeRate + DuplicateRate/k_dup + run-time/k_time) -where k_dup and k_time are the weights for duplicate rate and run-time. These weights play a significant role in determining the configuration of best performing parameters from the optimization frameworks. - +where k_dup and k_time are the weights for duplicate rate and run-time. These weights play a significant role in determining the configuration of best performing parameters from the optimization frameworks. + The list of track seeding parameters that are auto-tuned using these frameworks are as follows: * maxPtScattering: upper p_T limit for scattering angle calculations * impactMax: maximum value of impact parameter @@ -59,7 +59,7 @@ Run auto-tuning using Optuna The Optuna auto-tuning script can be run directly by invoking: `` python Optuna_tuning.py`` -This creates a new optuna study for a given number of trials defined within the script. The direction is set to maximize which means that the framework will try to maximize the score. +This creates a new optuna study for a given number of trials defined within the script. The direction is set to maximize which means that the framework will try to maximize the score. .. code-block:: console @@ -90,10 +90,4 @@ A dictionary called space is created by providing a list of parameters and their The objective function picks up a value for each parameter, run CKF and construct a score function from CKF output as in Optuna case. The only difference is that it tries to minimize the score unlike optuna. The objective function and number of trials are passed to the orion workon function ``experiment.workon(objective, max_trials=100)``. -The best parameter configuration corresponds to the minimum score function value and can be obtained from the experiment. - - - - - - +The best parameter configuration corresponds to the minimum score function value and can be obtained from the experiment. diff --git a/docs/examples/python_bindings.rst b/docs/examples/python_bindings.rst index 1ee81ec9fab..e5a94c81ad9 100644 --- a/docs/examples/python_bindings.rst +++ b/docs/examples/python_bindings.rst @@ -49,7 +49,7 @@ sets up the particle propagation and runs a few events. objDir = outputDir + "/obj" if not os.path.exists(objDir): os.mkdir(objDir) - + s.addWriter( acts.examples.ObjPropagationStepsWriter( level=acts.logging.INFO, @@ -97,7 +97,7 @@ then simply run ``pytest`` from the repository root. :name: python-virtualenv It is **strongly recommended** to use a `virtual environment`_ for - this purpose! For example, run + this purpose! For example, run .. code-block:: console @@ -132,7 +132,7 @@ that looks like test_ckf_tracks_example_truth_estimate__trackstates_ckf.root: ac4485c09a68fca3d056cb8d9adb81695e68d822629e48c71fd2b6d2bbd31f88 # ... -where the left side before the ``:`` indicates the test in which the check is performed and the name of the ROOT file +where the left side before the ``:`` indicates the test in which the check is performed and the name of the ROOT file that is checked. The right side is the reference hash. .. note:: The file from which reference hashes are loaded can be changed by setting the environment variable ``ROOT_HASH_FILE`` @@ -140,7 +140,7 @@ that is checked. The right side is the reference hash. These checks have two purposes: -1. Detect regressions in the algorithms: if an algorithm produces different output, the test will catch it. This also means that +1. Detect regressions in the algorithms: if an algorithm produces different output, the test will catch it. This also means that if algorithmic changes are made that intentionally change the output, the reference hashes also have to be updated. .. warning:: Please make sure to check the contents of a changed file are correct/reasonable before updating the reference hash! @@ -189,7 +189,7 @@ To update the reference hashes, simply replace the corresponding entries in ``ro .. note:: The CI runs the ROOT hash checks. However, we have observed the hashes to change between different machines. This is believed to be due to differences in math libraries producing slightly different outputs. As a consequence, - locally obtained file hashes might cause CI failures, as the CI hashes are different. + locally obtained file hashes might cause CI failures, as the CI hashes are different. For local testing, it is therefore advisable to use ``ROOT_HASH_FILE`` to use a different file for the reference hashes and populated it with known-good reference hashes from the ``main`` branch, before testing your developments. diff --git a/docs/figures/tracking/cca.svg b/docs/figures/tracking/cca.svg index e753d4b0a1c..9994c4a8477 100644 --- a/docs/figures/tracking/cca.svg +++ b/docs/figures/tracking/cca.svg @@ -1 +1 @@ -eight cellconnectivityfour cellconnectivity \ No newline at end of file +eight cellconnectivityfour cellconnectivity diff --git a/docs/figures/tracking/clustering.svg b/docs/figures/tracking/clustering.svg index 99ec1e5176e..c2f9e693b8b 100644 --- a/docs/figures/tracking/clustering.svg +++ b/docs/figures/tracking/clustering.svg @@ -1 +1 @@ -xyztrackxytrackenergy below threshold \ No newline at end of file +xyztrackxytrackenergy below threshold diff --git a/docs/figures/tracking/finding.svg b/docs/figures/tracking/finding.svg index 6911fbfcf80..9398dcacd1e 100644 --- a/docs/figures/tracking/finding.svg +++ b/docs/figures/tracking/finding.svg @@ -1 +1 @@ -track \ No newline at end of file +track diff --git a/docs/figures/tracking/geometry_detail.svg b/docs/figures/tracking/geometry_detail.svg index 50f0b6648a4..73a5a843c3e 100644 --- a/docs/figures/tracking/geometry_detail.svg +++ b/docs/figures/tracking/geometry_detail.svg @@ -1 +1 @@ -(a)(b)ApproachRepresentativeVolume boundsSensitivePassive \ No newline at end of file +(a)(b)ApproachRepresentativeVolume boundsSensitivePassive diff --git a/docs/figures/tracking/kalman.svg b/docs/figures/tracking/kalman.svg index 07646dcbf5c..d17115d33a8 100644 --- a/docs/figures/tracking/kalman.svg +++ b/docs/figures/tracking/kalman.svg @@ -1 +1 @@ -l0l1 \ No newline at end of file +l0l1 diff --git a/docs/figures/tracking/layer_barrel.svg b/docs/figures/tracking/layer_barrel.svg index adde689c555..29740936192 100644 --- a/docs/figures/tracking/layer_barrel.svg +++ b/docs/figures/tracking/layer_barrel.svg @@ -1 +1 @@ -r1r2r3 \ No newline at end of file +r1r2r3 diff --git a/docs/figures/tracking/layer_ec.svg b/docs/figures/tracking/layer_ec.svg index c0551fc11e7..027932d1cd6 100644 --- a/docs/figures/tracking/layer_ec.svg +++ b/docs/figures/tracking/layer_ec.svg @@ -1 +1 @@ -xyzz1z2z3 \ No newline at end of file +xyzz1z2z3 diff --git a/docs/figures/tracking/multiple_scattering.svg b/docs/figures/tracking/multiple_scattering.svg index b63ffb9ac67..1dc0741ea1d 100644 --- a/docs/figures/tracking/multiple_scattering.svg +++ b/docs/figures/tracking/multiple_scattering.svg @@ -1 +1 @@ - \ No newline at end of file + diff --git a/docs/figures/tracking/parameters.svg b/docs/figures/tracking/parameters.svg index 6f3fb70d92b..c7d820c1ca3 100644 --- a/docs/figures/tracking/parameters.svg +++ b/docs/figures/tracking/parameters.svg @@ -1 +1 @@ -l0l1plocal positionmomentum(a)φθxyz(b) \ No newline at end of file +l0l1plocal positionmomentum(a)φθxyz(b) diff --git a/docs/figures/tracking/perigee.svg b/docs/figures/tracking/perigee.svg index 5f7f2afa920..1fc92e764a4 100644 --- a/docs/figures/tracking/perigee.svg +++ b/docs/figures/tracking/perigee.svg @@ -1 +1 @@ -xyzd0z0particlep~~l \ No newline at end of file +xyzd0z0particlep~~l diff --git a/docs/figures/tracking/rk.svg b/docs/figures/tracking/rk.svg index 18cac3f5864..b905f787777 100644 --- a/docs/figures/tracking/rk.svg +++ b/docs/figures/tracking/rk.svg @@ -1 +1 @@ - \ No newline at end of file + diff --git a/docs/figures/tracking/seeding.svg b/docs/figures/tracking/seeding.svg index 35dc37e0fb0..df990bafd06 100644 --- a/docs/figures/tracking/seeding.svg +++ b/docs/figures/tracking/seeding.svg @@ -1 +1 @@ - \ No newline at end of file + diff --git a/docs/figures/tracking/segmentation.svg b/docs/figures/tracking/segmentation.svg index 1ad37ffafb7..391742028e4 100644 --- a/docs/figures/tracking/segmentation.svg +++ b/docs/figures/tracking/segmentation.svg @@ -1 +1 @@ -l0(a) stripl0l1(b) pixel \ No newline at end of file +l0(a) stripl0l1(b) pixel diff --git a/docs/figures/tracking/sp_l2g.svg b/docs/figures/tracking/sp_l2g.svg index 32452eff610..c850e2b7399 100644 --- a/docs/figures/tracking/sp_l2g.svg +++ b/docs/figures/tracking/sp_l2g.svg @@ -1 +1 @@ -xyzl1l0lzTglob!loc=TtransTrot \ No newline at end of file +xyzl1l0lzTglob!loc=TtransTrot diff --git a/docs/figures/tracking/surface_array.svg b/docs/figures/tracking/surface_array.svg index 4af6c5cdf1d..51024d7a667 100644 --- a/docs/figures/tracking/surface_array.svg +++ b/docs/figures/tracking/surface_array.svg @@ -1 +1 @@ -(a)(b) \ No newline at end of file +(a)(b) diff --git a/docs/figures/tracking/tracking.svg b/docs/figures/tracking/tracking.svg index ae118980a7b..e5e9fc8008f 100644 --- a/docs/figures/tracking/tracking.svg +++ b/docs/figures/tracking/tracking.svg @@ -1 +1 @@ -SeedingTrack findingTrack fitting \ No newline at end of file +SeedingTrack findingTrack fitting diff --git a/docs/figures/tracking/vertexing.svg b/docs/figures/tracking/vertexing.svg index d19d4e4b850..5e1ff311dba 100644 --- a/docs/figures/tracking/vertexing.svg +++ b/docs/figures/tracking/vertexing.svg @@ -1 +1 @@ -ppPrimary VertexSecondary VertexPile-up Vertex \ No newline at end of file +ppPrimary VertexSecondary VertexPile-up Vertex diff --git a/docs/figures/tracking/vertexing_flowchart.svg b/docs/figures/tracking/vertexing_flowchart.svg index d5900e89efc..a92822e7e6b 100644 --- a/docs/figures/tracking/vertexing_flowchart.svg +++ b/docs/figures/tracking/vertexing_flowchart.svg @@ -551,4 +551,4 @@ id="g458"> \ No newline at end of file + d="M 2.59026,0 -1.55415,2.0722 0,0 -1.55415,-2.0722" /> diff --git a/docs/plugins/MLAlgorithms.md b/docs/plugins/MLAlgorithms.md index 5fbe25e5958..3a81168bf65 100644 --- a/docs/plugins/MLAlgorithms.md +++ b/docs/plugins/MLAlgorithms.md @@ -16,12 +16,12 @@ The `OnnxRuntimeBase` class implements the inference of a standard MLP via ONNX. The goal of the ambiguity solver is to remove duplicated and fake tracks that remain after the CKF. To perform this cleaning, this algorithm works in three steps: - Clustering: tracks are clustered together, one cluster ~ one truth particle -- Ranking: tracks in each cluster are scored, the best one is kept +- Ranking: tracks in each cluster are scored, the best one is kept - Cleaning: last pass over all the remaining tracks to remove duplicate and fake (not implemented yet) ### Clustering -The clustering is implemented with the `clusterTracks` function. Its input is a multimap of a pair of track IDs and a vector of measurement IDs. The multimap uses the number of measurements associated with the tracks as a key, which is only a trick to sort the tracks efficiently by the number of measurements. Then, for each track, starting with the one with the most measurements, we check if a cluster shares a hit with the track. If not, we create a new cluster and associate all the hits of the current track with the cluster. If yes, the track is added to that cluster (note that the hits associated with the cluster don’t change here). After looping over all the tracks, each should have been associated with a cluster. +The clustering is implemented with the `clusterTracks` function. Its input is a multimap of a pair of track IDs and a vector of measurement IDs. The multimap uses the number of measurements associated with the tracks as a key, which is only a trick to sort the tracks efficiently by the number of measurements. Then, for each track, starting with the one with the most measurements, we check if a cluster shares a hit with the track. If not, we create a new cluster and associate all the hits of the current track with the cluster. If yes, the track is added to that cluster (note that the hits associated with the cluster don’t change here). After looping over all the tracks, each should have been associated with a cluster. ### Ranking @@ -48,7 +48,7 @@ While the ambiguity solver can significantly improve the cleanliness of the outp It uses the same three steps as the ML ambiguity solver but with seed instead of tracks: - Clustering: seeds are clustered together, one cluster ~ one truth particle -- Ranking: seeds in each cluster are scored, and the best one is kept +- Ranking: seeds in each cluster are scored, and the best one is kept - Cleaning: last pass over all the remaining scores to remove fake ### Clustering diff --git a/docs/plugins/dd4hep.md b/docs/plugins/dd4hep.md index 848d1961aa7..6f0a98fb81f 100644 --- a/docs/plugins/dd4hep.md +++ b/docs/plugins/dd4hep.md @@ -65,7 +65,7 @@ name encountered. It can be set from the XML using DD4hep's plugin mechanism and the provided `ParametersPlugin` like: ```xml - @@ -75,7 +75,7 @@ and the provided `ParametersPlugin` like: - + ``` @@ -109,7 +109,7 @@ following conditions need to be met: - The detector needs to have a barrel-endcap structure: Every hierarchy of subdetectors (e.g. PixelDetector, StripDetector,...) needs to be decomposed into - + 1. {barrel} 2. {barrel + 2 endcaps} 3. {2 endcaps} - in case there is no barrel at this stage (e.g. forward end caps) @@ -121,7 +121,7 @@ following conditions need to be met: assembly using the `DD4hep_SubdetectorAssembly` constructor which is provided by DD4hep. Example of usage in xml file (where Barrel0, nEndCap0 and pEndCap0 are sub detectors defined in the file `PixelTracker.xml`): - + ```xml @@ -147,7 +147,7 @@ following conditions need to be met: - Layers when containing sensitive material and/or the layer should carry material (which will be mapped on the layer if indicated), or the layer is sensitive itself. - + ```{note} The layer does not need to be a direct child of the volume (barrel or endcap), it an be nested in substructures @@ -227,7 +227,7 @@ ACTS geometry translation uses parameters attached to DD4hep detector elements v - + ``` diff --git a/docs/plugins/tgeo.rst b/docs/plugins/tgeo.rst index 40392d1525c..c7b235769b0 100644 --- a/docs/plugins/tgeo.rst +++ b/docs/plugins/tgeo.rst @@ -4,22 +4,22 @@ TGeo plugin General ------- -The ``TGeo`` plugin connects a geometry described with the ``ROOT::Geom`` module with Acts. This is done by parsing the ROOT geometry and selecting ``TGeoNode`` objects that represent chosen geometrical objects. +The ``TGeo`` plugin connects a geometry described with the ``ROOT::Geom`` module with Acts. This is done by parsing the ROOT geometry and selecting ``TGeoNode`` objects that represent chosen geometrical objects. ACTS detector elements are represented by surfaces with dedicated shapes, hence a conversion of the volume based ``TGeoNode`` description into ``Acts::Surface`` objects needs to take place. An example use of the ``TGeo`` plugin can be found in the ``TGeoDetector`` example. -Parsing the ROOT Geometry +Parsing the ROOT Geometry ------------------------- Once a geometry is imported in ROOT, it can be accessed via the ``gGeoManager``. The ``Acts::TGeoLayerBuilder``, which implements an ``Acts::ILayerBuilder``, relies on the fact that this ``gGeoManager`` is accessible, if it points to ``nullptr``, obviously no conversion is done. -The ``Acts::TGeoLayerBuilder`` can be configured to search within given parsing ranges for ``TGeoNode`` objects to be converted into ``Acts::Surface`` objects. A vector of search strings can be used for situations where multiple sensor types are used or additional objects are bound to be described. +The ``Acts::TGeoLayerBuilder`` can be configured to search within given parsing ranges for ``TGeoNode`` objects to be converted into ``Acts::Surface`` objects. A vector of search strings can be used for situations where multiple sensor types are used or additional objects are bound to be described. A dedicated ``Acts::TGeoParser`` struct is then used to select the nodes from the ROOT geometry. Conversion of TGeoShapes to Acts::Surfaces ------------------------------------------ -An automatic translation of ``TGeoShape`` objects into corresponding ``Acts::Surface`` objects with appropriate bound descriptions is done by the ``Acts::TGeoSurfaceConverter`` class. +An automatic translation of ``TGeoShape`` objects into corresponding ``Acts::Surface`` objects with appropriate bound descriptions is done by the ``Acts::TGeoSurfaceConverter`` class. There is some limited freedom in re-defining the orientation of the coordinate system between ROOT and ACTS which can be used to adapt local coordinate definitions on the converted objects. This is indicated by the ``const std::string& axes`` argument of the converter. @@ -37,30 +37,30 @@ has taken place by adding a splitter implementation to the ``Acts::TGeoLayerBuil .. figure:: figures/tgeo/TGeoBBox_PlaneSurface.png :width: 800 - + Conversion of a ``TGeoBBox`` shape into a ``Acts::PlaneSurface`` with ``Acts::RectangleBounds``. All axes iterations are allowed for this conversion. .. figure:: figures/tgeo/TGeoTrd1_PlaneSurface.png :width: 800 - + Conversion of a ``TGeoTrd1`` shape into a ``Acts::PlaneSurface`` with ``Acts::TrapezoidBounds``. The axes definitions need to be ``(x/X)(z/Z)(*/*)``. .. figure:: figures/tgeo/TGeoTrd2_PlaneSurface_xz.png :width: 800 - + Conversion of a ``TGeoTrd2`` shape into a ``Acts::PlaneSurface`` with ``Acts::TrapezoidBounds``. The axes definitions shown are ``(x/X)(z/Z)(*/*)``, the second coordinate has to be the z-axis. .. figure:: figures/tgeo/TGeoTrd2_PlaneSurface_yz.png :width: 800 - + Conversion of a ``TGeoTrd2`` shape into a ``Acts::PlaneSurface`` with ``Acts::TrapezoidBounds``. The axes definitions shown are ``(y/Y)(z/Z)(*/*)``, the second coordinate has to be the z-axis. .. figure:: figures/tgeo/TGeoTube_CylinderSurface.png :width: 800 - + Conversion of a ``TGeoTube`` shape into a ``Acts::CylinderSurface`` with ``Acts::CylinderBounds``. The axes definitions has to be ``(x/X)(y/Y)(*/*)``. .. figure:: figures/tgeo/TGeoTube_DiscSurface.png :width: 800 - + Conversion of a ``TGeoTube`` shape into a ``Acts::DiscSurface`` with ``Acts::DiscBounds``.The axes definitions has to be ``(x/X)(y/Y)(*/*)``. diff --git a/docs/tracking.md b/docs/tracking.md index 97be79f83da..54e582d1e0f 100644 --- a/docs/tracking.md +++ b/docs/tracking.md @@ -953,74 +953,74 @@ See [](vertexing_core) for a dedicated description of the vertexing as implemented in ACTS. ::: -A vertex is a point within the detector, where an interaction or a -decay occurred. We distinguish between primary vertices (from -collisions/interactions) and secondary vertices (from subsequent particle -decays), see {numref}`vertexing_illust`. Primary vertices are further divided -into hard-scatter and pile-up vertices. While primary vertices are located in +A vertex is a point within the detector, where an interaction or a +decay occurred. We distinguish between primary vertices (from +collisions/interactions) and secondary vertices (from subsequent particle +decays), see {numref}`vertexing_illust`. Primary vertices are further divided +into hard-scatter and pile-up vertices. While primary vertices are located in the luminous region, secondary vertices are slightly displaced due to the finite - life time of the decaying particle. + life time of the decaying particle. (vertexing_illust)= :::{figure} /figures/tracking/vertexing.svg :width: 400px :align: center Illustration of a set of three vertices in a proton-proton -collision. We distinguish between primary hard-scatter, primary pile-up, and +collision. We distinguish between primary hard-scatter, primary pile-up, and secondary vertices. ::: -Vertices play an important role in higher-level reconstruction algorithms. For -example, secondary vertices can help with the identification of particles: -During *$b$-tagging*, a displaced vertex located inside a jet is a sign for the +Vertices play an important role in higher-level reconstruction algorithms. For +example, secondary vertices can help with the identification of particles: +During *$b$-tagging*, a displaced vertex located inside a jet is a sign for the decay of a $b$-hadron. -In analogy to track reconstruction, vertex reconstruction can be divided into -two stages: vertex finding and vertex fitting. As a first step of vertex -finding, we compute a rough estimate of the vertex position from a set of +In analogy to track reconstruction, vertex reconstruction can be divided into +two stages: vertex finding and vertex fitting. As a first step of vertex +finding, we compute a rough estimate of the vertex position from a set of tracks. This first estimate can be calculated in many different ways, and is -referred to as "vertex seed". Seeding algorithms differ for primary and -secondary vertexing. For primary vertex seeding, one option is to use a -histogram approach to cluster tracks on the $z$-axis[^phd:piacquadio:2010]. -This is based on the assumption that primary vertices will be close to the -beamline. Other approaches model tracks as multivariate Gaussian distributions -and identify regions of high track density as vertex seeds[^phd:schlag:2022]. -For secondary vertexing, seeds are formed from pairs of reconstructed tracks as +referred to as "vertex seed". Seeding algorithms differ for primary and +secondary vertexing. For primary vertex seeding, one option is to use a +histogram approach to cluster tracks on the $z$-axis[^phd:piacquadio:2010]. +This is based on the assumption that primary vertices will be close to the +beamline. Other approaches model tracks as multivariate Gaussian distributions +and identify regions of high track density as vertex seeds[^phd:schlag:2022]. +For secondary vertexing, seeds are formed from pairs of reconstructed tracks as the constraint to the beamline does not apply. -Once a vertex seed is determined, tracks that are compatible with it are +Once a vertex seed is determined, tracks that are compatible with it are selected as part of the vertex finding. Before the vertex fit, we linearize tracks in the vicinity of the vertex seed under assuming that they follow a helical (for constant magnetic field) or -straight (for no magnetic field) trajectory[^phd:piacquadio:2010]. The vertex -fitter then uses this linearization to improve the position of the vertex seed. -Furthermore, the track momenta are refitted under the assumption that the tracks -originate at the vertex[^Fruhwirth:1987fm] [^billoirfitting:1992] . +straight (for no magnetic field) trajectory[^phd:piacquadio:2010]. The vertex +fitter then uses this linearization to improve the position of the vertex seed. +Furthermore, the track momenta are refitted under the assumption that the tracks +originate at the vertex[^Fruhwirth:1987fm] [^billoirfitting:1992] . -One issue with an approach like this is that the assignment of tracks to +One issue with an approach like this is that the assignment of tracks to vertices is ambiguous. As an improvement, one can perform a multi-vertex fit, -where vertices compete for tracks. This means that one track can be assigned to +where vertices compete for tracks. This means that one track can be assigned to several vertices. Their contribution to each vertex fit is determined by a weight factor, which, in turn, depends on the tracks' compatibility with respect to all vertices[^fruwirth:amvfitting:2004]. -A flowchart of a multi-vertex reconstruction chain is shown in +A flowchart of a multi-vertex reconstruction chain is shown in {numref}`vertexing_flowchart`. (vertexing_flowchart)= :::{figure} /figures/tracking/vertexing_flowchart.svg :width: 600px :align: center -Simplified flowchart of multi-vertex reconstruction. From a set of seed tracks, -we first compute a rough estimate of the vertex position, i.e., the vertex seed. -Then, we evaluate the compatibility of all tracks with the the latter. If a -track is deemed compatible, it is assigned a weight and attached to the vertex -seed. Next, the vertex seed and all previously found vertices that share tracks +Simplified flowchart of multi-vertex reconstruction. From a set of seed tracks, +we first compute a rough estimate of the vertex position, i.e., the vertex seed. +Then, we evaluate the compatibility of all tracks with the the latter. If a +track is deemed compatible, it is assigned a weight and attached to the vertex +seed. Next, the vertex seed and all previously found vertices that share tracks with it are (re-)fitted. Finally, after convergence of the fit, we check whether -the vertex candidate is merged with other vertices and discard it if that is the -case. For the next iteration, all tracks that were assigned to the vertex seed -and that have a weight above a certain threshold are removed from the seed +the vertex candidate is merged with other vertices and discard it if that is the +case. For the next iteration, all tracks that were assigned to the vertex seed +and that have a weight above a certain threshold are removed from the seed tracks. ::: @@ -1029,4 +1029,4 @@ tracks. [^phd:piacquadio:2010]: G. Piacquadio, 2010, Identification of b-jets and investigation of the discovery potential of a Higgs boson in the $W H \rightarrow l \nu \bar{b} b$ channel with the ATLAS experiment. [^phd:schlag:2022]: B. Schlag, 2022, Advanced Algorithms and Software for Primary Vertex Reconstruction and Search for Flavor-Violating Supersymmetry with the ATLAS Experiment. [^billoirfitting:1992]: P. Billoir et al., 2022, Fast vertex fitting with a local parametrization of tracks. -[^fruwirth:amvfitting:2004]: R. Frühwirth et al., 2004, Adaptive Multi-Vertex fitting. \ No newline at end of file +[^fruwirth:amvfitting:2004]: R. Frühwirth et al., 2004, Adaptive Multi-Vertex fitting. diff --git a/docs/white_paper_index_template.md.j2 b/docs/white_paper_index_template.md.j2 index 6531602d1fb..8150dd7260d 100644 --- a/docs/white_paper_index_template.md.j2 +++ b/docs/white_paper_index_template.md.j2 @@ -20,4 +20,3 @@ how_to_add {% endfor %} ::: - diff --git a/docs/white_paper_template.md.j2 b/docs/white_paper_template.md.j2 index b6f10e95bf5..ec11072d157 100644 --- a/docs/white_paper_template.md.j2 +++ b/docs/white_paper_template.md.j2 @@ -3,7 +3,7 @@ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ({{ whp.slug }})= -# {{ whp.metadata.title }} +# {{ whp.metadata.title }} [GitHub]({{ whp.repository }}) {%- if whp.pdf_url is not none %} , [PDF]({{ whp.pdf_url }}) @@ -24,4 +24,3 @@ {{ whp.metadata.description }} - diff --git a/docs/white_papers/how_to_add.md b/docs/white_papers/how_to_add.md index ce0eb042956..db617724a37 100644 --- a/docs/white_papers/how_to_add.md +++ b/docs/white_papers/how_to_add.md @@ -22,7 +22,7 @@ management of documents. The project will immediately start building the basic default note with dummy content using GitHub Actions. You can now clone and start making -changes to the document and compile locally. +changes to the document and compile locally. :::{tip} :name: latexmk-tip @@ -107,12 +107,12 @@ changed the content of the document. ::: - `references.bib` contains an example reference in standard *bibtex* format, and - is a good place to add any additional references that you want to cite in + is a good place to add any additional references that you want to cite in your document. - `latexmkrc` configures `latexmk` (see [here](#latexmk-tip)) -- `theme` contains the overall theme of the document. You do not typically need +- `theme` contains the overall theme of the document. You do not typically need to change anything in this folder. (whitepaper_index_update)= @@ -158,7 +158,7 @@ for the white papers. To run it, you need to install the dependencies in :::{tip} It is **strongly recommended** to use a [virtual environment](https://realpython.com/python-virtual-environments-a-primer/) for -this purpose! For example, run +this purpose! For example, run ```console $ python -m venv venv @@ -170,7 +170,7 @@ to create a local virtual environment, and then run the `pip` command above. You also need the `convert` executable from [ImageMagick](https://imagemagick.org/) available on your `$PATH`. You can -then run +then run ```console $ white_papers.py pull --github-token $GITHUB_TOKEN @@ -180,7 +180,7 @@ which will for each white_paper listed in `white_papers.toml` 1. Download the most recent PDF of the document built by that repository's CI 2. Make a PNG of the first page of that PDF to be displayed in the documentation -3. Download `metadata.tex` and `abstract.tex` from the repository, and parse +3. Download `metadata.tex` and `abstract.tex` from the repository, and parse them to extract the title, authors and abstract content. :::{important} diff --git a/thirdparty/GeoModel/README.md b/thirdparty/GeoModel/README.md index 663446149eb..ab796bc783e 100644 --- a/thirdparty/GeoModel/README.md +++ b/thirdparty/GeoModel/README.md @@ -2,4 +2,4 @@ This directory holds a simple build recipe for the [GeoModel](https://gitlab.cern.ch/GeoModelDev/GeoModel) project. It is used - in case `ACTS_USE_SYSTEM_GEOMODEL` is set to `FALSE` for the build. \ No newline at end of file + in case `ACTS_USE_SYSTEM_GEOMODEL` is set to `FALSE` for the build. diff --git a/thirdparty/README.md b/thirdparty/README.md index c4eeda8a48a..d90f1a04710 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -14,7 +14,7 @@ issues with missing files after installation. CMake instructions to build [nlohmann::json](https://github.com/nlohmann/json). -## boost +## boost For convenience, it's possible to use the ACTS build system to build the minimum required version of [boost](https://www.boost.org/) (currently 1.71.0). No source is diff --git a/thirdparty/actsvg/README.md b/thirdparty/actsvg/README.md index 55e83da351f..fc4e64c235a 100644 --- a/thirdparty/actsvg/README.md +++ b/thirdparty/actsvg/README.md @@ -3,4 +3,3 @@ This directory holds a simple build recipe for the [ActSVG](https://github.com/acts-project/actsvg) project. It is used in case `ACTS_USE_SYSTEM_ACTSVG` is set to `FALSE` for the build. - \ No newline at end of file diff --git a/thirdparty/traccc/CMakeLists.txt b/thirdparty/traccc/CMakeLists.txt index cd7307f38f4..2bea839ef1d 100644 --- a/thirdparty/traccc/CMakeLists.txt +++ b/thirdparty/traccc/CMakeLists.txt @@ -23,24 +23,24 @@ set( TRACCC_SETUP_VECMEM OFF CACHE BOOL set( TRACCC_SETUP_EIGEN3 OFF CACHE BOOL "Do not set up Eigen3 as part of Traccc" ) set( TRACCC_SETUP_THRUST ON CACHE BOOL - "Do not set up Thrust as part of Traccc" ) + "Do not set up Thrust as part of Traccc" ) set( TRACCC_SETUP_ALGEBRA_PLUGINS OFF CACHE BOOL - "Do not set up Algebra Plugins as part of Traccc" ) + "Do not set up Algebra Plugins as part of Traccc" ) set( TRACCC_SETUP_COVFIE OFF CACHE BOOL - "Do not set up Covfie as part of Traccc" ) + "Do not set up Covfie as part of Traccc" ) set( TRACCC_SETUP_DFELIBS OFF CACHE BOOL - "Do not set up dfelibs as part of Traccc" ) + "Do not set up dfelibs as part of Traccc" ) set( TRACCC_SETUP_DETRAY ON CACHE BOOL "Set up Detray as part of Traccc" ) set( TRACCC_SETUP_ACTS OFF CACHE BOOL - "Do not set up ACTS as part of Traccc" ) + "Do not set up ACTS as part of Traccc" ) set( TRACCC_SETUP_TBB OFF CACHE BOOL - "Do not set up TBB as part of Traccc" ) + "Do not set up TBB as part of Traccc" ) set( TRACCC_BUILD_TESTING OFF CACHE BOOL - "Turn off the build of the Traccc unit tests" ) + "Turn off the build of the Traccc unit tests" ) set( TRACCC_BUILD_EXAMPLES OFF CACHE BOOL - "Turn off the build of the Traccc examples" ) + "Turn off the build of the Traccc examples" ) # Now set up its build. -FetchContent_MakeAvailable( traccc ) \ No newline at end of file +FetchContent_MakeAvailable( traccc ) diff --git a/version_number b/version_number index 49ddabbbeb6..c5c9fb1f4a4 100644 --- a/version_number +++ b/version_number @@ -1 +1 @@ -9.9.9 \ No newline at end of file +9.9.9