Skip to content

Commit

Permalink
Add detector cutting to all unit tests by default. Cleanup resulting …
Browse files Browse the repository at this point in the history
…problems due to assumptions about looping over local detectors. Add extra debug plot support to offset template.
  • Loading branch information
tskisner committed Dec 13, 2023
1 parent 7708062 commit 7318a9b
Show file tree
Hide file tree
Showing 57 changed files with 846 additions and 308 deletions.
21 changes: 11 additions & 10 deletions src/libtoast/src/toast_tod_filter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ void toast::filter_polynomial(int64_t order, size_t n, uint8_t * flags,
last = &full_templates[(iorder - 1) * scanlen];
lastlast = &full_templates[(iorder - 2) * scanlen];
double orderinv = 1. / iorder;

#pragma omp simd
for (size_t i = 0; i < scanlen; ++i) {
const double x = xstart + i * dx;
Expand Down Expand Up @@ -130,6 +131,16 @@ void toast::filter_polynomial(int64_t order, size_t n, uint8_t * flags,
singular_values.data(), rcond_limit,
&rank, WORK.data(), LWORK, &info);

if (info != 0) {
auto log = toast::Logger::get();
std::ostringstream o;
o << "DGELLS: " << ngood << "/" << scanlen << " good samples, order " <<
norder;
o << " failed with info " << info;
log.error(o.str().c_str(), TOAST_HERE());
throw std::runtime_error(o.str().c_str());
}

for (int iorder = 0; iorder < norder; ++iorder) {
double * temp = &full_templates[iorder * scanlen];
for (int isignal = 0; isignal < nsignal; ++isignal) {
Expand All @@ -143,16 +154,6 @@ void toast::filter_polynomial(int64_t order, size_t n, uint8_t * flags,
}
}
}

if (info != 0) {
auto log = toast::Logger::get();
std::ostringstream o;
o << "DGELLS: " << ngood << "/" << scanlen << " good samples, order " <<
norder;
o << " failed with info " << info;
log.error(o.str().c_str(), TOAST_HERE());
throw std::runtime_error(o.str().c_str());
}
}
}

Expand Down
17 changes: 9 additions & 8 deletions src/toast/_libtoast/ops_filterbin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ void expand_matrix(py::array_t <double> compressed_matrix,
}
}


void build_template_covariance(std::vector <int64_t> & starts,
std::vector <int64_t> & stops,
std::vector <py::array_t <double,
Expand Down Expand Up @@ -312,7 +311,6 @@ void build_template_covariance(std::vector <int64_t> & starts,
}
}


// void build_template_covariance(
// py::buffer starts,
// py::buffer stops,
Expand Down Expand Up @@ -340,12 +338,12 @@ void build_template_covariance(std::vector <int64_t> & starts,
// stops, "stops", 1, temp_shape, {n_template}
// );

// // Template covariance
// // Template covariance
// double * raw_template_cov = extract_buffer <double> (
// template_covariance,
// "template_covariance",
// 2,
// temp_shape,
// template_covariance,
// "template_covariance",
// 2,
// temp_shape,
// {n_template, n_template}
// );

Expand Down Expand Up @@ -377,7 +375,10 @@ void build_template_covariance(std::vector <int64_t> & starts,
// val = 1;
// }
// for (int64_t isample = istart; isample < istop; ++isample) {
// //std::cerr << "DBG: (" << row << "," << col << "," << isample << ") trow[" << isample << "-" << row_start << "]=" << raw_row[isample - row_start] << " tcol[" << isample << "-" << col_start << "]=" << raw_col[isample - col_start] << " g=" << raw_good[isample] << std::endl;
// //std::cerr << "DBG: (" << row << "," << col << "," << isample << ")
// trow[" << isample << "-" << row_start << "]=" << raw_row[isample - row_start] << "
// tcol[" << isample << "-" << col_start << "]=" << raw_col[isample - col_start] << "
// g=" << raw_good[isample] << std::endl;
// val += raw_row[isample - row_start]
// * raw_col[isample - col_start]
// * raw_good[isample];
Expand Down
16 changes: 10 additions & 6 deletions src/toast/_libtoast/tod_filter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@


void sum_detectors(py::array_t <int64_t,
py::array::c_style | py::array::forcecast> detectors,
py::array::c_style | py::array::forcecast> det_indx,
py::array_t <int64_t,
py::array::c_style | py::array::forcecast> flag_indx,
py::array_t <unsigned char,
py::array::c_style | py::array::forcecast> shared_flags,
unsigned char shared_flag_mask,
Expand All @@ -20,14 +22,15 @@ void sum_detectors(py::array_t <int64_t,
py::array::c_style | py::array::forcecast> sum_data,
py::array_t <int64_t,
py::array::c_style | py::array::forcecast> hits) {
auto fast_detectors = detectors.unchecked <1>();
auto fast_detindx = det_indx.unchecked <1>();
auto fast_flagindx = flag_indx.unchecked <1>();
auto fast_shared_flags = shared_flags.unchecked <1>();
auto fast_det_data = det_data.unchecked <2>();
auto fast_det_flags = det_flags.unchecked <2>();
auto fast_sum_data = sum_data.mutable_unchecked <1>();
auto fast_hits = hits.mutable_unchecked <1>();

size_t ndet = fast_detectors.shape(0);
size_t ndet = fast_detindx.shape(0);
size_t nsample = fast_det_data.shape(1);

size_t buflen = 10000;
Expand All @@ -38,11 +41,12 @@ void sum_detectors(py::array_t <int64_t,
size_t sample_start = ibuf * buflen;
size_t sample_stop = std::min(sample_start + buflen, nsample);
for (size_t idet = 0; idet < ndet; ++idet) {
int64_t row = fast_detectors(idet);
int64_t datarow = fast_detindx(idet);
int64_t flagrow = fast_flagindx(idet);
for (size_t sample = sample_start; sample < sample_stop; ++sample) {
if ((fast_shared_flags(sample) & shared_flag_mask) != 0) continue;
if ((fast_det_flags(row, sample) & det_flag_mask) != 0) continue;
fast_sum_data(sample) += fast_det_data(row, sample);
if ((fast_det_flags(flagrow, sample) & det_flag_mask) != 0) continue;
fast_sum_data(sample) += fast_det_data(datarow, sample);
fast_hits(sample)++;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/toast/noise_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def NET(self, det):
def _detector_weight(self, det):
if self._NET[det] == 0:
nunit = self._NET[det].unit
return 0.0 * (1.0 / (nunit ** 2) / u.Hz)
return 0.0 * (1.0 / (nunit**2) / u.Hz)
else:
wt = 1.0 / (self._NET[det] ** 2) / self._rate[det]
return wt.decompose()
Expand Down
13 changes: 7 additions & 6 deletions src/toast/observation.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,14 @@ def set_default_values(values=None):

# composite masks for convenience
defaults["shared_mask_nonscience"] = (
defaults["shared_mask_invalid"] |
defaults["shared_mask_unstable_scanrate"] |
defaults["shared_mask_irregular"]
defaults["shared_mask_invalid"]
| defaults["shared_mask_unstable_scanrate"]
| defaults["shared_mask_irregular"]
)
defaults["det_mask_nonscience"] = (
defaults["det_mask_invalid"] |
defaults["det_mask_processing"] |
defaults["det_mask_sso"]
defaults["det_mask_invalid"]
| defaults["det_mask_processing"]
| defaults["det_mask_sso"]
)

if values is not None:
Expand Down Expand Up @@ -443,6 +443,7 @@ def select_local_detectors(
for det in self.local_detectors:
if (det in sel_set) and (det in good):
dets.append(det)
# print(f"SELECT mask {int(flagmask)} {selection}: {dets}", flush=True)
return dets

# Detector set distribution
Expand Down
14 changes: 12 additions & 2 deletions src/toast/ops/arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ def _exec(self, data, detectors=None, **kwargs):

for ob in data.obs:
# Get the detectors we are using for this observation
dets = ob.select_local_detectors(detectors)
if len(dets) == 0:
local_dets = ob.select_local_detectors(detectors)
if len(local_dets) == 0:
# Nothing to do for this observation
continue
if self.first not in ob.detdata:
Expand All @@ -90,6 +90,16 @@ def _exec(self, data, detectors=None, **kwargs):
first_units = ob.detdata[self.first].units
second_units = ob.detdata[self.second].units

# Operate on the intersection of detectors
dets = list(
sorted(
set.intersection(
set(ob.detdata[self.first].detectors),
set(ob.detdata[self.second].detectors),
)
)
)

if self.result == self.first:
result_units = first_units
scale_first = 1.0
Expand Down
115 changes: 84 additions & 31 deletions src/toast/ops/demodulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,21 +192,44 @@ def _exec(self, data, detectors=None, **kwargs):
timer = Timer()
timer.start()
for obs in demodulate_obs:
dets = obs.select_local_detectors(detectors, flagmask=self.det_flag_mask)
# Get the detectors which are not cut with per-detector flags
local_dets = obs.select_local_detectors(
detectors, flagmask=self.det_flag_mask
)
# print(f"DEMOD Proc {obs.comm.group_rank}: local {local_dets}")
if obs.comm.comm_group is None:
all_dets = local_dets
# print(f"DEMOD Proc {obs.comm.group_rank}: comm_group is None")
else:
proc_dets = obs.comm.comm_group.gather(local_dets, root=0)
all_dets = None
if obs.comm.comm_group.rank == 0:
# print(f"DEMOD gathered {proc_dets}")
all_dets = set()
for pdets in proc_dets:
for d in pdets:
all_dets.add(d)
all_dets = list(sorted(all_dets))
all_dets = obs.comm.comm_group.bcast(all_dets, root=0)
# print(f"DEMOD Proc {obs.comm.group_rank}: all {all_dets}")

offset = obs.local_index_offset
nsample = obs.n_local_samples

fsample = obs.telescope.focalplane.sample_rate
fmax, hwp_rate = self._get_fmax(obs)
# print(
# f"DEMOD: fsample={fsample}, fmax={fmax}, hwprate={hwp_rate}",
# flush=True,
# )
wkernel = self._get_wkernel(fmax, fsample)
lowpass = Lowpass(wkernel, fmax, fsample, offset, self.nskip, self.window)

# Create a new observation to hold the demodulated and downsampled data

demod_telescope = self._demodulate_telescope(obs)
demod_telescope = self._demodulate_telescope(obs, all_dets)
demod_times = self._demodulate_times(obs)
demod_detsets = self._demodulate_detsets(obs)
demod_detsets = self._demodulate_detsets(obs, all_dets)
demod_sample_sets = self._demodulate_sample_sets(obs)
demod_process_rows = obs.dist.process_rows

Expand All @@ -230,7 +253,7 @@ def _exec(self, data, detectors=None, **kwargs):
# Allocate storage

demod_dets = []
for det in dets:
for det in local_dets:
for prefix in self.prefixes:
demod_dets.append(f"{prefix}_{det}")
n_local = demod_obs.n_local_samples
Expand All @@ -244,6 +267,7 @@ def _exec(self, data, detectors=None, **kwargs):

self._demodulate_shared_data(obs, demod_obs)

# (f"DEMOD Proc {obs.comm.group_rank} local_dets: {demod_obs.local_detectors}", flush=True)
exists_data = demod_obs.detdata.ensure(
self.det_data,
detectors=demod_dets,
Expand All @@ -254,10 +278,12 @@ def _exec(self, data, detectors=None, **kwargs):
self.det_flags, detectors=demod_dets, dtype=np.uint8
)

self._demodulate_flags(obs, demod_obs, dets, wkernel, offset)
self._demodulate_signal(data, obs, demod_obs, dets, lowpass)
self._demodulate_pointing(data, obs, demod_obs, dets, lowpass, offset)
self._demodulate_noise(obs, demod_obs, dets, fsample, hwp_rate, lowpass)
self._demodulate_flags(obs, demod_obs, local_dets, wkernel, offset)
self._demodulate_signal(data, obs, demod_obs, local_dets, lowpass)
self._demodulate_pointing(data, obs, demod_obs, local_dets, lowpass, offset)
self._demodulate_noise(
obs, demod_obs, local_dets, fsample, hwp_rate, lowpass
)

self._demodulate_intervals(obs, demod_obs)

Expand All @@ -282,7 +308,9 @@ def _exec(self, data, detectors=None, **kwargs):
def _get_fmax(self, obs):
times = obs.shared[self.times].data
hwp_angle = np.unwrap(obs.shared[self.hwp_angle].data)
hwp_rate = np.mean(np.diff(hwp_angle) / np.diff(times)) / (2 * np.pi) * u.Hz
hwp_rate = np.absolute(
np.mean(np.diff(hwp_angle) / np.diff(times)) / (2 * np.pi) * u.Hz
)
if self.fmax is not None:
fmax = self.fmax
else:
Expand All @@ -300,22 +328,30 @@ def _get_wkernel(self, fmax, fsample):
return wkernel

@function_timer
def _demodulate_telescope(self, obs):
def _demodulate_telescope(self, obs, all_dets):
focalplane = obs.telescope.focalplane
det_data = focalplane.detector_data
field_names = det_data.colnames
# Initialize fields to empty lists
fields = dict([(name, []) for name in field_names])
for idet, det in enumerate(det_data["name"]):
fields = {name: list() for name in field_names}
all_set = set(all_dets)
for row, det in enumerate(det_data["name"]):
if det not in all_set:
continue
for field_name in field_names:
# Each detector translates into 3 or 5 new entries
for prefix in self.prefixes:
if field_name == "name":
fields[field_name].append(f"{prefix}_{det}")
else:
fields[field_name].append(det_data[field_name][idet])
fields = [fields[field_name] for field_name in field_names]
demod_det_data = QTable(fields, names=field_names)
fields[field_name].append(det_data[field_name][row])
demod_det_data = QTable(
[fields[field_name] for field_name in field_names], names=field_names
)
my_all = list()
for name in demod_det_data["name"]:
my_all.append(name)

demod_focalplane = Focalplane(
detector_data=demod_det_data,
field_of_view=focalplane.field_of_view,
Expand Down Expand Up @@ -359,23 +395,40 @@ def _demodulate_shared_data(self, obs, demod_obs):
return

@function_timer
def _demodulate_detsets(self, obs):
"""Lump all derived detectors into detector sets"""
detsets = obs.all_detector_sets
demod_detsets = []
if detsets is None:
for det in obs.all_detectors:
demod_detset = []
def _demodulate_detsets(self, obs, all_dets):
"""In order to force local detectors to remain on their original
process, we create a detector set for each row of the process
grid.
"""
log = Logger.get()
if obs.comm_col_size == 1:
# One process row
detsets = [all_dets]
else:
local_proc_dets = obs.comm_col.gather(obs.local_detectors, root=0)
detsets = None
if obs.comm_col_rank == 0:
all_set = set(all_dets)
detsets = list()
for iprow, pdets in enumerate(local_proc_dets):
plocal = list()
for d in pdets:
if d in all_set:
plocal.append(d)
if len(plocal) == 0:
msg = f"obs {obs.name}, process row {iprow} has no"
msg += " unflagged detectors. This may cause an error."
log.warning(msg)
detsets.append(plocal)
detsets = obs.comm_col.bcast(detsets, root=0)

demod_detsets = list()
for dset in detsets:
demod_detset = list()
for det in dset:
for prefix in self.prefixes:
demod_detset.append(f"{prefix}_{det}")
demod_detsets.append(demod_detset)
else:
for detset in detsets:
demod_detset = []
for det in detset:
for prefix in self.prefixes:
demod_detset.append(f"{prefix}_{det}")
demod_detsets.append(demod_detset)
demod_detsets.append(demod_detset)
return demod_detsets

@function_timer
Expand Down Expand Up @@ -634,7 +687,7 @@ class StokesWeightsDemod(Operator):
)

weights = Unicode(
defaults.weights, help="Observation detdata key for output weights"
f"{defaults.weights}_demod", help="Observation detdata key for output weights"
)

single_precision = Bool(False, help="If True, use 32bit float in output")
Expand Down
Loading

0 comments on commit 7318a9b

Please sign in to comment.