Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Experimental CB in WS2024 configuration #377

Closed
wants to merge 15 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions flamedisx/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ def set_data(self,
continue

# Copy ensures annotations don't clobber
source.set_data(deepcopy(data[dname]), data_is_annotated)
source.set_data(deepcopy(data[dname]), data_is_annotated,ignore_priors=True)

# Update batch info
dset_index = self.dsetnames.index(dname)
Expand Down Expand Up @@ -355,6 +355,10 @@ def simulate(self, fix_truth=None, **params):
mu = rm * s.mu_before_efficiencies(
**self._filter_source_kwargs(params, sname))
# Simulate this many events from source
if type(self.sources[sname]).__name__!='TemplateSource':
#only do for non-template sources
mu=mu/self.mu_estimators[sname](**self._filter_source_kwargs(params, sname))

n_to_sim = np.random.poisson(mu)
if n_to_sim == 0:
continue
Expand Down Expand Up @@ -532,7 +536,6 @@ def _log_likelihood(self,
self.log_constraint(**kwargs),
0.)


# Autodifferentiation. This is why we use tensorflow:
grad = tf.gradients(ll, grad_par_stack)[0]
if second_order:
Expand Down
23 changes: 16 additions & 7 deletions flamedisx/nest/lxe_blocks/energy_spectrum.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
class EnergySpectrum(fd.FirstBlock):
dimensions = ('energy',)
model_attributes = (
'energies',
'energies','max_dim_size',
'radius', 'z_top', 'z_bottom', 'z_topDrift',
'drift_velocity',
't_start', 't_stop',
Expand All @@ -32,6 +32,14 @@ class EnergySpectrum(fd.FirstBlock):
# Just a dummy 0-10 keV spectrum
energies = tf.cast(tf.linspace(0., 10., 1000),
dtype=fd.float_type())
default_size=100
max_dim_size : dict
def setup(self):
"""
Setups up a managable max dim size
"""
assert isinstance(self.max_dim_size,dict)
self.max_dim_size={'energy':self.max_dim_size['energy']}

drift_map_dt = None
def derive_drift_time(self, data):
Expand All @@ -58,7 +66,7 @@ def derive_observed_xy(self, data):
y_obs = self.drift_map_x(np.array([data['r'], data['z']]).T) * sinTheta

return x_obs, y_obs

def domain(self, data_tensor):
assert isinstance(self.energies, tf.Tensor) # see WIMPsource for why

Expand Down Expand Up @@ -283,15 +291,16 @@ def _compute(self, data_tensor, ptensor, *, energy):
def mu_before_efficiencies(self, **params):
return np.sum(fd.np_to_tf(self.rates_vs_energy))


#Relics from before dimsize was an attribute-maybe this can be changed?
@export
class FixedShapeEnergySpectrumNR(FixedShapeEnergySpectrum):
max_dim_size = {'energy': 150}


def setup(self):
self.max_dim_size=self.max_dim_size
@export
class FixedShapeEnergySpectrumER(FixedShapeEnergySpectrum):
max_dim_size = {'energy': 100}
def setup(self):
self.max_dim_size=self.max_dim_size



@export
Expand Down
110 changes: 77 additions & 33 deletions flamedisx/nest/lxe_blocks/quanta_splitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def _compute(self,
ions_produced,
# Dependency domain and value
energy, rate_vs_energy):

def compute_single_energy(args, approx=False):
# Compute the block for a single energy.
# Set approx to True for an approximate computation at higher energies
Expand All @@ -62,7 +61,12 @@ def compute_single_energy(args, approx=False):

# Calculate the ion domain tensor for this energy
_ions_produced = ions_produced_add + ions_min

#every event in the batch shares E therefore ions domain
_ions_produced_1D=_ions_produced[0,0,0,:]
#create p_ni/p_nq domain for ER/NR
nq_2D=tf.repeat(unique_quanta[:,o],tf.shape(_ions_produced_1D)[0],axis=1)
ni_2D=tf.repeat(_ions_produced_1D[o,:],tf.shape(unique_quanta)[0],axis=0)

if self.is_ER:
nel_mean = self.gimme('mean_yield_electron', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=energy)
Expand All @@ -72,19 +76,25 @@ def compute_single_energy(args, approx=False):
bonus_arg=nq_mean)

if approx:
p_nq = tfp.distributions.Normal(loc=nq_mean,
scale=tf.sqrt(nq_mean * fano) + 1e-10).prob(nq)
p_nq_1D = tfp.distributions.Normal(loc=nq_mean,
scale=tf.sqrt(nq_mean * fano) + 1e-10).prob(unique_quanta)
else:
normal_dist_nq = tfp.distributions.Normal(loc=nq_mean,
scale=tf.sqrt(nq_mean * fano) + 1e-10)
p_nq = normal_dist_nq.cdf(nq + 0.5) - normal_dist_nq.cdf(nq - 0.5)
scale=tf.sqrt(nq_mean * fano) + 1e-10)
p_nq_1D=normal_dist_nq.cdf(unique_quanta + 0.5) - normal_dist_nq.cdf(unique_quanta - 0.5)
#restore p_ni from unique_nq x n_ions -> unique_nel x n_electrons x n_photons (does not need n_ions)
p_nq=tf.gather_nd(params=p_nq_1D,indices=index_nq[:,o],batch_dims=0)
p_nq=tf.reshape(p_nq,[tf.shape(nq)[0],tf.shape(nq)[1],tf.shape(nq)[2]])


ex_ratio = self.gimme('exciton_ratio', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=energy)
alpha = 1. / (1. + ex_ratio)

p_ni = tfp.distributions.Binomial(
total_count=nq, probs=alpha).prob(_ions_produced)
p_ni_2D=tfp.distributions.Binomial(total_count=nq_2D, probs=alpha).prob(ni_2D)
#restore p_ni from unique_nq x n_ions -> unique_nel x n_electrons x n_photons x n_ions
p_ni=tf.gather_nd(params=p_ni_2D,indices=index_nq[:,o],batch_dims=0)
p_ni=tf.reshape(tf.reshape(p_ni,[-1]),[tf.shape(nq)[0],tf.shape(nq)[1],tf.shape(nq)[2],tf.shape(nq)[3]])

else:
yields = self.gimme('mean_yields', data_tensor=data_tensor, ptensor=ptensor,
Expand All @@ -98,37 +108,47 @@ def compute_single_energy(args, approx=False):
bonus_arg=nq_mean)
ni_fano = yield_fano[0]
nex_fano = yield_fano[1]


#p_ni does not need to have an altered dimensionality, see tensordot.
if approx:
p_ni = tfp.distributions.Normal(loc=nq_mean*alpha,
scale=tf.sqrt(nq_mean*alpha*ni_fano) + 1e-10).prob(_ions_produced)
p_ni_1D = tfp.distributions.Normal(loc=nq_mean*alpha,
scale=tf.sqrt(nq_mean*alpha*ni_fano) + 1e-10).prob(_ions_produced_1D)

p_nq = tfp.distributions.Normal(loc=nq_mean*alpha*ex_ratio,
p_nq_2D = tfp.distributions.Normal(loc=nq_mean*alpha*ex_ratio,
scale=tf.sqrt(nq_mean*alpha*ex_ratio*nex_fano) + 1e-10).prob(
nq - _ions_produced)
nq_2D - ni_2D)
else:
normal_dist_ni = tfp.distributions.Normal(loc=nq_mean*alpha,
scale=tf.sqrt(nq_mean*alpha*ni_fano) + 1e-10)
p_ni = normal_dist_ni.cdf(_ions_produced + 0.5) - \
normal_dist_ni.cdf(_ions_produced - 0.5)
p_ni_1D = normal_dist_ni.cdf(_ions_produced_1D + 0.5) - \
normal_dist_ni.cdf(_ions_produced_1D - 0.5)

normal_dist_nq = tfp.distributions.Normal(loc=nq_mean*alpha*ex_ratio,
scale=tf.sqrt(nq_mean*alpha*ex_ratio*nex_fano) + 1e-10)
p_nq = normal_dist_nq.cdf(nq - _ions_produced + 0.5) \
- normal_dist_nq.cdf(nq - _ions_produced - 0.5)
p_nq_2D = normal_dist_nq.cdf(nq_2D - ni_2D + 0.5) \
- normal_dist_nq.cdf(nq_2D - ni_2D - 0.5)

# restore p_nq from unique_nq x n_ions -> unique_nel x n_electrons x n_photons x n_ions
p_nq=tf.gather_nd(params=p_nq_2D,indices=index_nq[:,o],batch_dims=0)
p_nq=tf.reshape(tf.reshape(p_nq,[-1]),[tf.shape(nq)[0],tf.shape(nq)[1],tf.shape(nq)[2],tf.shape(nq)[3]])



nel_2D=tf.repeat(unique_nel[:,o],tf.shape(_ions_produced_1D)[0],axis=1)
ni_nel_2D=tf.repeat(_ions_produced_1D[o,:],tf.shape(unique_nel)[0],axis=0)

recomb_p = self.gimme('recomb_prob', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=(nel_mean, nq_mean, ex_ratio))
skew = self.gimme('skewness', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=nq_mean)
var = self.gimme('variance', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=(nel_mean, nq_mean, recomb_p, _ions_produced))
bonus_arg=(nel_mean, nq_mean, recomb_p, ni_nel_2D))
width_corr = self.gimme('width_correction', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=skew)
mu_corr = self.gimme('mu_correction', data_tensor=data_tensor, ptensor=ptensor,
bonus_arg=(skew, var, width_corr))

mean = (tf.ones_like(_ions_produced, dtype=fd.float_type()) - recomb_p) * _ions_produced - mu_corr
mean = (tf.ones_like(ni_nel_2D, dtype=fd.float_type()) - recomb_p) * ni_nel_2D - mu_corr
std_dev = tf.sqrt(var) / width_corr

if self.is_ER:
Expand All @@ -137,19 +157,28 @@ def compute_single_energy(args, approx=False):
owens_t_terms = 5

if approx:
p_nel = fd.tfp_files.SkewGaussian(loc=mean, scale=std_dev,
skewness=skew,
owens_t_terms=owens_t_terms).prob(electrons_produced)
p_nel_1D = fd.tfp_files.SkewGaussian(loc=mean, scale=std_dev,
skewness=skew,
owens_t_terms=owens_t_terms).prob(nel_2D)
else:
p_nel = fd.tfp_files.TruncatedSkewGaussianCC(loc=mean, scale=std_dev,
skewness=skew,
limit=_ions_produced,
owens_t_terms=owens_t_terms).prob(electrons_produced)

p_mult = p_nq * p_ni * p_nel

# Contract over ions_produced
p_final = tf.reduce_sum(p_mult, 3)
p_nel_1D = fd.tfp_files.TruncatedSkewGaussianCC(loc=mean, scale=std_dev,
skewness=skew,
limit=ni_nel_2D,
owens_t_terms=owens_t_terms).prob(nel_2D)


#Restore p_nel unique_nel x nions-> unique_nel x n_electrons x n_photons x n_ions
p_nel=tf.gather_nd(params=p_nel_1D,indices=index_nel[:,o],batch_dims=0)
p_nel=tf.reshape(tf.reshape(p_nel,[-1]),[tf.shape(nq)[0],tf.shape(nq)[1],tf.shape(nq)[3]])
p_nel=tf.repeat(p_nel[:,:,o,:],tf.shape(nq)[2],axis=2)

#modified contractions remove need for costly repeats in ions dimension.
if self.is_ER:
p_mult = p_ni * p_nel
p_final = tf.reduce_sum(p_mult, 3)*p_nq
else:
p_mult = p_nq*p_nel
p_final = tf.tensordot(p_mult,p_ni_1D,axes=[[3],[0]])

r_final = p_final * rate_vs_energy

Expand All @@ -169,6 +198,12 @@ def compute_single_energy_approx(args):
return compute_single_energy(args, approx=True)

nq = electrons_produced + photons_produced
#remove degenerate dimensions
#nevtxnelxnph->nq
unique_quanta,index_nq=tf.unique(tf.reshape(nq[:,:,:,0],[-1]))
#nevtxnel->nel'
unique_nel,index_nel=tf.unique(tf.reshape(electrons_produced[:,:,0,0],[-1]))


ions_min_initial = self.source._fetch('ions_produced_min', data_tensor=data_tensor)[:, 0, o]
ions_min_initial = tf.repeat(ions_min_initial, tf.shape(ions_produced)[1], axis=1)
Expand All @@ -179,6 +214,7 @@ def compute_single_energy_approx(args):
# for the lowest energy
ions_produced_add = ions_produced - ions_min_initial


# Energy above which we use the approximate computation
if self.is_ER:
cutoff_energy = 5.
Expand Down Expand Up @@ -249,8 +285,16 @@ def _simulate(self, d):
d['ions_produced'] = np.where(ni_temp < 0,
ni_temp * 0,
ni_temp)

nex_temp = np.round(stats.norm.rvs(nq*alpha*ex_ratio, np.sqrt(nq*alpha*ex_ratio*nex_fano))).astype(int)

mean_nex=nq*alpha*ex_ratio
width_nex=mean_nex*nex_fano
mean_nex=np.where(mean_nex < 0,
mean_nex * 0,
mean_nex)
width_nex=np.where(width_nex < 0,
width_nex * 0,
width_nex)
nex_temp = np.round(stats.norm.rvs(mean_nex, np.sqrt(width_nex))).astype(int)
# Don't let number of excitons go negative
nex = np.where(nex_temp < 0,
nex_temp * 0,
Expand Down
Loading
Loading