From c944b5ddcd11822ab4876fd6607b30d2ec5d926a Mon Sep 17 00:00:00 2001 From: Cecilia Ferrari Date: Wed, 10 Jan 2024 14:13:50 +0100 Subject: [PATCH] adding new block to model s2 loss --- flamedisx/lxe_blocks/raw_signals.py | 28 +++++++----- flamedisx/lxe_blocks/reconstruct_signals.py | 36 ++++++++-------- flamedisx/lxe_blocks/s2_loss.py | 47 +++++++++++++++++++++ flamedisx/lxe_sources.py | 2 + flamedisx/xenon/x1t_sr1.py | 6 ++- 5 files changed, 89 insertions(+), 30 deletions(-) create mode 100644 flamedisx/lxe_blocks/s2_loss.py diff --git a/flamedisx/lxe_blocks/raw_signals.py b/flamedisx/lxe_blocks/raw_signals.py index c67eeede3..cc2f1cd5d 100644 --- a/flamedisx/lxe_blocks/raw_signals.py +++ b/flamedisx/lxe_blocks/raw_signals.py @@ -27,15 +27,14 @@ class MakeFinalSignals(fd.Block): signal_name: str def _simulate(self, d): + d[self.signal_name] = stats.norm.rvs( + loc=(d[self.quanta_name + 's_detected'] + * self.gimme_numpy(self.quanta_name + '_gain_mean')), + scale=(d[self.quanta_name + 's_detected']**0.5 + * self.gimme_numpy(self.quanta_name + '_gain_std'))) if self.quanta_name == 'electron': - d[self.signal_name] = stats.poisson.rvs(d[self.quanta_name + 's_detected'] - * self.gimme_numpy(self.quanta_name + '_gain_mean')) - else: - d[self.signal_name] = stats.norm.rvs( - loc=(d[self.quanta_name + 's_detected'] - * self.gimme_numpy(self.quanta_name + '_gain_mean')), - scale=(d[self.quanta_name + 's_detected']**0.5 - * self.gimme_numpy(self.quanta_name + '_gain_std'))) + d[self.signal_name] = stats.binom.rvs(d[self.signal_name], + p=self.gimme_numpy(self.quanta_name + '_loss_mean')) def _annotate(self, d): m = self.gimme_numpy(self.quanta_name + '_gain_mean') @@ -70,14 +69,19 @@ def _compute(self, # add offset to std to avoid NaNs from norm.pdf if std = 0 if self.quanta_name == 'electron': - s_observed = tf.clip_by_value(s_observed, 1e-15, tf.float32.max) - result = tfp.distributions.Poisson( - rate = mean + 1e-10 ).prob(s_observed) + mean_electron_loss = self.gimme(self.quanta_name + '_loss_mean', + data_tensor=data_tensor, + ptensor=ptensor)[:, o, o] + result = tfp.distributions.Normal( + loc=mean, scale=std + 1e-10 + ).prob(s_observed) + result = tfp.distributions.Binomial(result, probs =mean_electron_loss).prob(s_observed) else: result = tfp.distributions.Normal( loc=mean, scale=std + 1e-10 ).prob(s_observed) + ''' Think can also remove this chunk # Add detection/selection efficiency result *= self.gimme(SIGNAL_NAMES[self.quanta_name] + '_acceptance', @@ -132,6 +136,8 @@ def electron_gain_mean(z, *, g2=20): electron_gain_std = 5. + electron_loss_mean = 1. + def _compute(self, data_tensor, ptensor, electrons_detected, s2_raw): return super()._compute( diff --git a/flamedisx/lxe_blocks/reconstruct_signals.py b/flamedisx/lxe_blocks/reconstruct_signals.py index 7255baea5..c4d025402 100644 --- a/flamedisx/lxe_blocks/reconstruct_signals.py +++ b/flamedisx/lxe_blocks/reconstruct_signals.py @@ -14,7 +14,7 @@ class ReconstructSignals(fd.Block): """Common code for ReconstructS1 and ReconstructS2""" model_attributes = ('check_acceptances',) - non_integer_dimensions = ('s1_raw', 's2_raw',) + non_integer_dimensions = ('s1_raw', 's2_raw_after_loss',) # Whether to check acceptances are positive at the observed events. # This is recommended, but you'll have to turn it off if your @@ -186,10 +186,10 @@ def _compute(self, data_tensor, ptensor, @export class ReconstructS2(ReconstructSignals): - raw_signal_name = 's2_raw' + raw_signal_name = 's2_raw_after_loss' signal_name = 's2' - dimensions = ('s2_raw', 's2') + dimensions = ('s2_raw_after_loss', 's2') special_model_functions = ( 'reconstruction_bias_s2_simulate', 'reconstruction_smear_s2_simulate', @@ -200,7 +200,7 @@ class ReconstructS2(ReconstructSignals): ('s2_acceptance',) + special_model_functions) - max_dim_size = {'s2_raw': 240} + max_dim_size = {'s2_raw_after_loss': 240} s2_smear_load = 3e-3 def s2_acceptance(self, s2, s2_min=2, s2_max=6000): @@ -208,45 +208,45 @@ def s2_acceptance(self, s2, s2_min=2, s2_max=6000): tf.zeros_like(s2, dtype=fd.float_type()), tf.ones_like(s2, dtype=fd.float_type())) - # Getting from s2_raw -> s2 - def reconstruction_bias_s2_simulate(self, s2_raw): + # Getting from s2_raw_after_loss -> s2 + def reconstruction_bias_s2_simulate(self, s2_raw_after_loss): """ Dummy method for pax s2 reconstruction bias mean. Overwrite it in source specific class. See x1t_sr1.py for example. """ - return tf.ones_like(s2_raw, dtype=fd.float_type()) + return tf.ones_like(s2_raw_after_loss, dtype=fd.float_type()) - def reconstruction_smear_s2_simulate(self, s2_raw): + def reconstruction_smear_s2_simulate(self, s2_raw_after_loss): """ Dummy method for pax s2 reconstruction bias spread. Overwrite it in source specific class. See x1t_sr1.py for example. Not quite a dirac delta, which will make this block an identity matrix but computationally intractible or at least not trivially. Need to smear - this dirac delta out by a small loading term of 0.001. A larger s2_raw + this dirac delta out by a small loading term of 0.001. A larger s2_raw_after_loss max_dim_size would need a smaller loading term. """ - return tf.zeros_like(s2_raw, dtype=fd.float_type())+self.s2_smear_load + return tf.zeros_like(s2_raw_after_loss, dtype=fd.float_type())+self.s2_smear_load - # Getting from s2 -> s2_raw - def reconstruction_bias_s2_annotate(self, s2_raw): + # Getting from s2 -> s2_raw_after_loss + def reconstruction_bias_s2_annotate(self, s2_raw_after_loss): """ Dummy method for pax s2 reconstruction bias mean. Overwrite it in source specific class. See x1t_sr1.py for example. """ - return tf.ones_like(s2_raw, dtype=fd.float_type()) + return tf.ones_like(s2_raw_after_loss, dtype=fd.float_type()) - def reconstruction_smear_s2_annotate(self, s2_raw): + def reconstruction_smear_s2_annotate(self, s2_raw_after_loss): """ Dummy method for pax s2 reconstruction bias spread. Overwrite it in source specific class. See x1t_sr1.py for example. Not quite a dirac delta, which will make this block an identity matrix but computationally intractible or at least not trivially. Need to smear - this dirac delta out by a small loading term of 0.001. A larger s2_raw + this dirac delta out by a small loading term of 0.001. A larger s2_raw_after_loss max_dim_size would need a smaller loading term. """ - return tf.zeros_like(s2_raw, dtype=fd.float_type())+self.s2_smear_load + return tf.zeros_like(s2_raw_after_loss, dtype=fd.float_type())+self.s2_smear_load def _compute(self, data_tensor, ptensor, - s2_raw, s2): + s2_raw_after_loss, s2): return super()._compute( - s_raw=s2_raw, + s_raw=s2_raw_after_loss, s_observed=s2, data_tensor=data_tensor, ptensor=ptensor) diff --git a/flamedisx/lxe_blocks/s2_loss.py b/flamedisx/lxe_blocks/s2_loss.py new file mode 100644 index 000000000..13cf0a8e4 --- /dev/null +++ b/flamedisx/lxe_blocks/s2_loss.py @@ -0,0 +1,47 @@ +import numpy as np +from scipy import stats +import tensorflow as tf +import tensorflow_probability as tfp + +import flamedisx as fd +export, __all__ = fd.exporter() +o = tf.newaxis + + +@export +class MakeS2AfterLoss(fd.Block): + dimensions = ('s2_raw', 's2_raw_after_loss') + + model_functions = ('s2_survival_p',) + + max_dim_size = {'s2_raw': 240} + + s2_survival_p = 1 + + def _compute(self, data_tensor, ptensor, + s2_raw, s2_raw_after_loss): + s2_survival_probability = self.gimme('s2_survival_p', + data_tensor=data_tensor, ptensor=ptensor)[:, o, o] + + # s2_raw_after_loss distributed as Binom(s2_raw, p=s2_survival_probability) + result = tfp.distributions.Binomial( + total_count=s2_raw, + probs=tf.cast(s2_survival_probability, dtype=fd.float_type()) + ).prob(s2_raw_after_loss) + + return result + + def _simulate(self, d): + d['s2_raw_after_loss'] = stats.binom.rvs( + n=d['s2_raw'], + p=self.gimme_numpy('s2_survival_p')) + + def _annotate(self, d): + # TODO: this assumes the spread from the double PE effect is subdominant + s2_survival_probability = self.gimme_numpy('s2_survival_p') + for suffix, intify in (('min', np.floor), + ('max', np.ceil), + ('mle', lambda x: x)): + d['s2_raw_' + suffix] = \ + intify(d['s2_raw_after_loss_' + suffix].values + / (1 + s2_survival_p)) diff --git a/flamedisx/lxe_sources.py b/flamedisx/lxe_sources.py index b8bc1ee25..91f46b6d8 100644 --- a/flamedisx/lxe_sources.py +++ b/flamedisx/lxe_sources.py @@ -16,6 +16,7 @@ class ERSource(fd.BlockModelSource): fd.ReconstructS1, fd.DetectElectrons, fd.MakeS2, + fd.MakeS2AfterLoss, fd.ReconstructS2) @staticmethod @@ -50,6 +51,7 @@ class NRSource(fd.BlockModelSource): fd.ReconstructS1, fd.DetectElectrons, fd.MakeS2, + fd.MakeS2AfterLoss, fd.ReconstructS2) final_dimensions = ('s1', 's2') diff --git a/flamedisx/xenon/x1t_sr1.py b/flamedisx/xenon/x1t_sr1.py index 173570eb1..91e219ed0 100644 --- a/flamedisx/xenon/x1t_sr1.py +++ b/flamedisx/xenon/x1t_sr1.py @@ -31,6 +31,7 @@ DEFAULT_G2_TOTAL = DEFAULT_G2 / (1.-DEFAULT_AREA_FRACTION_TOP) DEFAULT_SINGLE_ELECTRON_GAIN = DEFAULT_G2_TOTAL / DEFAULT_EXTRACTION_EFFICIENCY DEFAULT_SINGLE_ELECTRON_WIDTH = 0.25 * DEFAULT_SINGLE_ELECTRON_GAIN +DEFAULT_S2_SURVIVAL_PROBABILITY = 1 # Official numbers from BBF DEFAULT_S1_RECONSTRUCTION_BIAS_PIVOT = 0.5948841302444277 @@ -514,7 +515,10 @@ def electron_gain_std(s2_relative_ly, # 0 * light yield is to fix the shape return single_electron_width + 0. * s2_relative_ly - + @staticmethod + def s2_survival_p(z,*, s2_survival_prob=DEFAULT_S2_SURVIVAL_PROBABILITY): + # 0 * light yield is to fix the shape + return s2_survival_prob + 0. * z @staticmethod