Skip to content

Commit

Permalink
Online triggered event run2auau new 2024p007 v1.0 (#19)
Browse files Browse the repository at this point in the history
* Back to the 5k events per segment.

* Directory structure should reflect the runtype configuration.

* Support cosmics and calibration run types.

* Increase priority.

* TPC calibration runs now supported from single workflow.

* Need to search any of the staging directories (beam, physics, ...)

* Setup for the run2auau run range.

* runtype dependent output directory.

* Fix typo.

* Further typo fix... and reduce the load on the production database.

* run2pp-->run2auau

* Increase memory request.

* Reorder

* Reapply fix.

* add run2auau calo macros (#18)

* ... and reapply once more after the cherry pick.

* Prepare to reproduce calibrated calorimeter DSTs from run 54100 to present.

* Commit to reprocess calo calib DSTs from run 54100 on.

* Commit the missing streaming ruleset.

* Separate the tracking workflow from the event builder

* Run event builder w/out the tracking workflow, to allow us to specify different limits on the max number of jobs submitted in one pass.

* Switch to the run2auau working area to get the correct set of macros.

---------

Co-authored-by: Chris Pinkenburg <[email protected]>
  • Loading branch information
klendathu2k and pinkenburg authored Oct 14, 2024
1 parent bfa7809 commit f86e623
Show file tree
Hide file tree
Showing 13 changed files with 594 additions and 148 deletions.
4 changes: 1 addition & 3 deletions run2auau/CaloProduction/runy2calib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,7 @@ echo ./cups.py -v -r ${runnumber} -s ${segment} -d ${outbase} finished -e ${stat


echo "bdee bdee bdee, That's All Folks!"
} > ${logdir#file:/}/${logbase}.out 2 ${logdir#file:/}/>${logbase}.err

#mv ${logbase}.out ${logdir#file:/}
#mv ${logbase}.err ${logdir#file:/}
} > ${logdir#file:/}/${logbase}.out 2> ${logdir#file:/}/${logbase}.err

exit $status_f4a
86 changes: 4 additions & 82 deletions run2auau/DST_CALIBRATIONS_run2auau.yaml
Original file line number Diff line number Diff line change
@@ -1,81 +1,3 @@
#_______________________________________________________________________________________________________DST_EVENT__
#
# kaedama.py --rule DST_EVENT --config examples/sPHENIX/DST_EVENT_aua23.yaml --runs ...
#
DST_TPCCALIB_run2auau:

# DST_EVENT works from a pre-built set of run lists.
params:
name: DST_TPCCALIB_run2auau
build: new
build_name: new
dbtag: 2024p002
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run_cosmics.sh
payload : ./ProdFlow/run2pp/cosmics/
neventsper: 100
mem : 8192MB

#
# input query:
#
# This builds a list of all runs known to the file catalog "datasets" table.
# The query should return:
# 1. The source of the information (formatted as database name/table name)
# 2. The run number
# 3. A sequence number (a placeholder fixed at zero for event builders)
# 4. And a space-separated list of logical filenames
#
# The {*_condition} parameters (run, seg, limit) are substituted by kaedama
# based on (optional) command line options.
#
input:
db: daqdb
direct_path: /sphenix/lustre01/sphnxpro/{mode}/tpc/calib/
query: |-
select
'daqdb/filelist' as source ,
runnumber ,
0 as segment ,
string_agg( distinct split_part(filename,'/',-1), ' ' ) as files ,
string_agg( distinct split_part(filename,'/',-1) || ':' || firstevent || ':' || lastevent, ' ' ) as fileranges
from
filelist
where
filename like '/bbox%/TPC%calib%.evt'
{run_condition}
and runnumber<=53880
group by runnumber
having
every(transferred_to_sdcc)
order by runnumber
{limit_condition}
;
# TODO: Need to add error checking to make sure that outdir, logdir, etc... are quoted properly. Else, this will cause problems with argument substitution
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/tpccalib/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/testlogs/run_$(rungroup)"
condor : "/tmp/testlogs/run_$(rungroup)"

#
# Again I note the need to ensure that the arguments are properly specified given the
# definition of the payload script.
#
job:
executable : "{payload}/run_cosmics.sh"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {neventsper} {logdir}"
output_destination : '{logdir}'
transfer_input_files : "{payload},cups.py,bachi.py,odbc.ini"
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
accounting_group_user : "sphnxpro"
priority : '3800'




#_______________________________________________________________________________________________________DST_EVENT__
#
# kaedama.py --rule DST_EVENT --config examples/sPHENIX/DST_EVENT_aua23.yaml --runs ...
Expand All @@ -97,7 +19,7 @@ DST_MBD_CALIBRATION_run2auau:

input:
db: daqdb
direct_path: /sphenix/lustre01/sphnxpro/{mode}/mbd/physics
direct_path: /sphenix/lustre01/sphnxpro/{mode}/mbd/*/
query: |-
select
'daqdb/filelist' as source ,
Expand All @@ -113,7 +35,7 @@ DST_MBD_CALIBRATION_run2auau:
(filename like '/bbox%/%mbd%physics%' and lastevent>2 )
)
{run_condition}
and runnumber<=53880
and runnumber>53880
group by runnumber
Expand All @@ -128,7 +50,7 @@ DST_MBD_CALIBRATION_run2auau:
# TODO: Need to add error checking to make sure that outdir, logdir, etc... are quoted properly. Else, this will cause problems with argument substitution
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/TEST/mbdcalib/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/mbdcalib/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/mbdcalib/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/mbdcalib/$(build)_$(tag)/run_$(rungroup)"

Expand All @@ -139,7 +61,7 @@ DST_MBD_CALIBRATION_run2auau:
#
job:
executable : "{payload}/run.sh"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {neventsper} {logdir} {pass0dir}"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) {neventsper} {logdir} {pass0dir}"
output_destination : '{logdir}'
transfer_input_files : "{payload},cups.py,bachi.py,odbc.ini"
log : '{condor}/{logbase}.condor'
Expand Down
71 changes: 36 additions & 35 deletions run2auau/DST_STREAMING_EVENT_run2auau_new_2024p007.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@ DST_STREAMING_EVENT_run2auau_new_2024p007:
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run_cosmics.sh
payload : ./ProdFlow/run2pp/cosmics/
payload : ./ProdFlow/run2auau/cosmics/
comment : "---"
rsync : "./ProdFlow/run2pp/cosmics/*,cups.py,bachi.py,odbc.ini"
rsync : "./ProdFlow/run2auau/cosmics/*,cups.py,bachi.py,odbc.ini"
mem : 20000MB
# 20GB of memory is not a typo
zstrig : 150
neventsperZS: 10000
neventsperNoZS: 100
myruntypes: beam|physics|calib|cosmics


input:
Expand Down Expand Up @@ -44,11 +45,11 @@ DST_STREAMING_EVENT_run2auau_new_2024p007:
filelist,run2auau
where
(
(filename similar to '/bbox%/TPC%(beam|physics)%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/TPOT%(beam|physics)%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/(beam|physics)_intt%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/GL1_(beam|physics)%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/(beam|physics)_mvtx%.evt' and lastevent>2 )
(filename similar to '/bbox%/TPC%({myruntypes})%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/TPOT%({myruntypes})%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/({myruntypes})_intt%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/GL1_({myruntypes})%.evt' and lastevent>2 ) or
(filename similar to '/bbox%/({myruntypes})_mvtx%.evt' and lastevent>2 )
)
and runnumber>=run2auau.firstrun and runnumber<=run2auau.lastrun
Expand All @@ -59,12 +60,12 @@ DST_STREAMING_EVENT_run2auau_new_2024p007:
having
every(transferred_to_sdcc) and
max(lastevent)>1000 and
sum( case when filename similar to '/bbox%/GL1_(beam|physics)%' then 1 else 0 end )>0 and
sum( case when filename similar to '/bbox%/GL1_({myruntypes})%' then 1 else 0 end )>0 and
(
sum( case when filename similar to '/bbox%/TPC%(beam|physics)%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/TPOT%(beam|physics)%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/(beam|physics)%intt%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/(beam|physics)_mvtx%.evt' then 1 else 0 end )>0
sum( case when filename similar to '/bbox%/TPC%({myruntypes})%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/TPOT%({myruntypes})%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/({myruntypes})%intt%' then 1 else 0 end )>0 or
sum( case when filename similar to '/bbox%/({myruntypes})_mvtx%.evt' then 1 else 0 end )>0
)
order by runnumber
),
Expand Down Expand Up @@ -92,7 +93,7 @@ DST_STREAMING_EVENT_run2auau_new_2024p007:
# TODO: Need to add error checking to make sure that outdir, logdir, etc... are quoted properly. Else, this will cause problems with argument substitution
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/streaming/physics/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/slurp/streaming/physics/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/streaminglogs/$(build)_$(tag)/run_$(rungroup)"
histdir : "/sphenix/data/data02/sphnxpro/streamhist/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/testlogs/$(build)_$(tag)/run_$(rungroup)"
Expand All @@ -103,7 +104,7 @@ DST_STREAMING_EVENT_run2auau_new_2024p007:
#
job:
executable : "{payload}/run_cosmics.sh"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) $(neventsper) {logdir} {comment} {histdir} {PWD} {rsync}"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) $(neventsper) {logdir} {comment} {histdir} {PWD} {rsync}"
output_destination : '{logdir}'
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
Expand All @@ -123,9 +124,9 @@ DST_TRKR_HIT_run2auau_new_2024p007:
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run.sh
payload : ./ProdFlow/run2pp/TrackingProduction/
mem : 2048MB
rsync : "./ProdFlow/run2pp/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
payload : ./ProdFlow/run2auau/TrackingProduction/
mem : 4096MB
rsync : "./ProdFlow/run2auau/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
input : "DST_STREAMING_EVENT_run2auau_new_2024p007%"
mnrun : 53881
mxrun : 99999
Expand Down Expand Up @@ -153,19 +154,19 @@ DST_TRKR_HIT_run2auau_new_2024p007:
{limit_condition}
;
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/trackinglogs/$(build)_$(tag)/run_$(rungroup)"
histdir : "/sphenix/data/data02/sphnxpro/hitsethist/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/trkrogs/$(build)_$(tag)/run_$(rungroup)"

job:
executable : "{payload}/run.sh"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
arguments : "$(nevents) {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
output_destination : '{logdir}'
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
accounting_group_user : "sphnxpro"
priority : '3800'
priority : '3900'



Expand All @@ -183,10 +184,10 @@ DST_TRKR_CLUSTER_run2auau_new_2024p007:
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run_job0.sh
payload : ./ProdFlow/run2pp/TrackingProduction/
payload : ./ProdFlow/run2auau/TrackingProduction/
mem : 2048MB
nevents : 0
rsync : "./ProdFlow/run2pp/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
rsync : "./ProdFlow/run2auau/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
input : "DST_TRKR_HIT_run2auau_new_2024p007%"
mnrun : 53881
mxrun : 99999
Expand Down Expand Up @@ -214,19 +215,19 @@ DST_TRKR_CLUSTER_run2auau_new_2024p007:
{limit_condition}
;
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/trackinglogs/$(build)_$(tag)/run_$(rungroup)"
histdir : "/sphenix/data/data02/sphnxpro/clusterhist/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/trkrlogs/$(build)_$(tag)/run_$(rungroup)"

job:
executable : "{payload}/run_job0.sh"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
output_destination : '{logdir}'
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
accounting_group_user : "sphnxpro"
priority : '3800'
priority : '3900'



Expand All @@ -242,10 +243,10 @@ DST_TRKR_SEED_run2auau_new_2024p007:
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run_jobA.sh
payload : ./ProdFlow/run2pp/TrackingProduction/
payload : ./ProdFlow/run2auau/TrackingProduction/
mem : 2048MB
nevents : 0
rsync : "./ProdFlow/run2pp/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
rsync : "./ProdFlow/run2auau/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
input : "DST_TRKR_CLUSTER_run2auau_new_2024p007%"
mnrun : 53881
mxrun : 99999
Expand Down Expand Up @@ -273,19 +274,19 @@ DST_TRKR_SEED_run2auau_new_2024p007:
{limit_condition}
;
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/trackinglogs/$(build)_$(tag)/run_$(rungroup)"
histdir : "/sphenix/data/data02/sphnxpro/seedhist/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/trkrlogs/$(build)_$(tag)/run_$(rungroup)"

job:
executable : "{payload}/run_jobA.sh"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
output_destination : '{logdir}'
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
accounting_group_user : "sphnxpro"
priority : '3800'
priority : '3900'


DST_TRKR_TRACKS_run2auau_new_2024p007:
Expand All @@ -298,10 +299,10 @@ DST_TRKR_TRACKS_run2auau_new_2024p007:
logbase : $(name)_$(build)_$(tag)-$INT(run,{RUNFMT})-$INT(seg,{SEGFMT})
outbase : $(name)_$(build)_$(tag)
script : run_jobC.sh
payload : ./ProdFlow/run2pp/TrackingProduction/
payload : ./ProdFlow/run2auau/TrackingProduction/
mem : 2048MB
nevents : 0
rsync : "./ProdFlow/run2pp/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
rsync : "./ProdFlow/run2auau/TrackingProduction/*,cups.py,bachi.py,odbc.ini"
seeds : "DST_TRKR_SEED_run2auau_new_2024p007%"
clusters: "DST_TRKR_CLUSTER_run2auau_new_2024p007%"
mnrun : 53881
Expand Down Expand Up @@ -346,19 +347,19 @@ DST_TRKR_TRACKS_run2auau_new_2024p007:
;
;
filesystem:
outdir : "/sphenix/lustre01/sphnxpro/physics/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
outdir : "/sphenix/lustre01/sphnxpro/{runtype}/slurp/tracking/$(build)_$(tag)/run_$(rungroup)"
logdir : "file:///sphenix/data/data02/sphnxpro/trackinglogs//$(build)_$(tag)/run_$(rungroup)"
histdir : "/sphenix/data/data02/sphnxpro/trackhist/$(build)_$(tag)/run_$(rungroup)"
condor : "/tmp/trkrlogs/$(build)_$(tag)/run_$(rungroup)"

job:
executable : "{payload}/run_jobC.sh"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) {outdir} $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
arguments : "{nevents} {outbase} {logbase} $(run) $(seg) $(outdir) $(build) $(tag) $(inputs) $(ranges) {logdir} {histdir} {PWD} {rsync}"
output_destination : '{logdir}'
log : '{condor}/{logbase}.condor'
accounting_group : "group_sphenix.mdc2"
accounting_group_user : "sphnxpro"
priority : '3800'
priority : '3900'



Expand Down
Loading

0 comments on commit f86e623

Please sign in to comment.