diff --git a/frb/galaxies/param_UV_duste.py b/frb/galaxies/param_UV_duste.py deleted file mode 100644 index 1c6ed710..00000000 --- a/frb/galaxies/param_UV_duste.py +++ /dev/null @@ -1,375 +0,0 @@ -# Imports - -import matplotlib.pyplot as plt -import numpy as np -import os -from pandas import read_csv, read_table -from prospect.models import priors, SedModel -from prospect.models.sedmodel import PolySedModel -from prospect.models.templates import TemplateLibrary -from prospect.sources import CSPSpecBasis -from sedpy.observate import load_filters -from prospect.utils.obsutils import fix_obs -from scipy.stats import truncnorm - -##################################################################### - -# Adding mass-metallicity relation - -class MassMet(priors.Prior): - """ - A Gaussian prior designed to approximate the Gallazzi et al. 2005 - stellar mass--stellar metallicity relationship. - """ - - prior_params = ['mass_mini', 'mass_maxi', 'z_mini', 'z_maxi'] - distribution = truncnorm - massmet = np.loadtxt('gallazzi_05_massmet.txt') - def __len__(self): - """ Hack to work with Prospector 0.3 - """ - return 2 - - def scale(self,mass): - upper_84 = np.interp(mass, self.massmet[:,0], self.massmet[:,3]) - lower_16 = np.interp(mass, self.massmet[:,0], self.massmet[:,2]) - return (upper_84-lower_16) - - def loc(self,mass): - return np.interp(mass, self.massmet[:,0], self.massmet[:,1]) - - def get_args(self,mass): - a = (self.params['z_mini'] - self.loc(mass)) / self.scale(mass) - b = (self.params['z_maxi'] - self.loc(mass)) / self.scale(mass) - return [a, b] - - @property - def range(self): - return ((self.params['mass_mini'], self.params['mass_maxi']),\ - (self.params['z_mini'], self.params['z_maxi'])) - - def bounds(self, **kwargs): - if len(kwargs) > 0: - self.update(**kwargs) - return self.range - - def __call__(self, x, **kwargs): - """Compute the value of the probability density function at x and - return the ln of that. - - :params x: - x[0] = mass, x[1] = metallicity. Used to calculate the prior - - :param kwargs: optional - All extra keyword arguments are used to update the `prior_params`. - - :returns lnp: - The natural log of the prior probability at x, scalar or ndarray of - same length as the prior object. - """ - if len(kwargs) > 0: - self.update(**kwargs) - p = np.atleast_2d(np.zeros_like(x)) - a, b = self.get_args(x[...,0]) - p[...,1] = self.distribution.pdf(x[...,1], a, b, loc=self.loc(x[...,0]), scale=self.scale(x[...,0])) - with np.errstate(invalid='ignore'): - p[...,1] = np.log(p[...,1]) - return p - - def sample(self, nsample=None, **kwargs): - """Draw a sample from the prior distribution. - - :param nsample: (optional) - Unused - """ - if len(kwargs) > 0: - self.update(**kwargs) - mass = np.random.uniform(low=self.params['mass_mini'],high=self.params['mass_maxi'],size=nsample) - a, b = self.get_args(mass) - met = self.distribution.rvs(a, b, loc=self.loc(mass), scale=self.scale(mass), size=nsample) - - return np.array([mass, met]) - - def unit_transform(self, x, **kwargs): - """Go from a value of the CDF (between 0 and 1) to the corresponding - parameter value. - - :param x: - A scalar or vector of same length as the Prior with values between - zero and one corresponding to the value of the CDF. - - :returns theta: - The parameter value corresponding to the value of the CDF given by - `x`. - """ - if len(kwargs) > 0: - self.update(**kwargs) - mass = x[0]*(self.params['mass_maxi'] - self.params['mass_mini']) + self.params['mass_mini'] - a, b = self.get_args(mass) - met = self.distribution.ppf(x[1], a, b, loc=self.loc(mass), scale=self.scale(mass)) - return np.array([mass,met]) - -def massmet_to_mass(massmet=None, **extras): - return 10**massmet[0] -def massmet_to_logzol(massmet=None,**extras): - return massmet[1] - -##################################################################################################### - -# Adding dust1 and gas_logz functions - -def dust2_to_dust1(dust2=None, **kwargs): - return dust2 - -def gas_logz(gas_logz=None, **kwargs): - return gas_logz - -###################################################################################################### - -# Run parameters - -run_params = {'verbose':True, - 'debug':False, - 'outfile':'FRB_190608_full_UV_newspec2', - 'output_pickles': False, - # Optimization parameters - 'do_powell': False, - 'ftol':3e-16, 'maxfev': 5000, - 'do_levenburg': True, - 'nmin': 5, - # dynesty Fitter parameters - 'nested_bound': 'multi', # bounding method - 'nested_sample': 'rwalk', # sampling method - 'nested_nlive_init': 500, # higher number increases speed of run - 'nested_nlive_batch': 500, # higher number increases speed of run - 'nested_bootstrap': 0, - 'nested_dlogz_init': 0.05, - 'nested_weight_kwargs': {"pfrac": 1.0}, - 'nested_stop_kwargs': {"post_thresh": 0.1}, - # Obs data parameters - 'objid':190608, - 'logify_spectrum':False, - 'normalize_spectrum':False, - 'luminosity_distance': None, # in Mpc - # Model parameters - 'add_neb': True, - 'add_duste': True, - # SPS parameters - 'zcontinuous': 1, - } - -# -------------- -# OBS -# -------------- -def load_spec_ascii(filename): - f = read_table(filename, delimiter = '|', header=0, names=['NA', 'wave', 'flux', 'sig', 'NA2'], - usecols =['wave', 'flux', 'sig']) - wave = np.array(f['wave']) - flux = np.array(f['flux']) - err = np.array(f['sig']) - return wave, flux, err - -def load_spec_csv(filename): - f = read_csv(filename, header=None, names=['wave', 'flux', 'err'], delimiter=',') - wave = np.array(f['wave']) - flux = np.array(f['flux']) - err = np.array(f['err']) - return wave, flux, err - -# Convert from flux per Ang to flux per Hz -def convert(flux, wave, micro=True): - """ - Convert from flux in per Angstrom to - flux in per Hz (Jy). Note: wavelength - vector must be in Ang. - """ - new_flux = 3.34e4*(wave**2)*flux - if micro == True: - new_flux = new_flux*1e6 - return new_flux - -def chop_wave(wave, w1, w2): - cond_1 = np.where(w1 > wave) - cond_2 = np.where(w2 < wave) - return cond_1, cond_2 - -def load_obs(objid=run_params["objid"], phottable=None, - luminosity_distance=0, snr=10, **kwargs): - - sdss = ['sdss_{}0'.format(b) for b in ['u', 'g', 'r', 'i', 'z']] - hst = ['wfc3_ir_f160w', 'wfc3_uvis_f300x'] - wise = ['wise_w1', 'wise_w2', 'wise_w3', 'wise_w4'] - - filternames = sdss + hst + wise - - # these are apparent magnitudes - - M_AB = np.array([18.9867385, 18.0239936, 17.5538606, 17.21585955, 17.087944049999997, 16.693-0.02, 19.765-0.30, 14.372155755376001+2.699, 13.834173935194169+3.339, 10.762158747454539+5.174, 8.647572481509055+6.620]) - - - # uncertainties in apparent magnitudes - M_AB_unc = np.array([0.08660249, 0.01794288, 0.01485198, 0.01709021, 0.04910865, 0.001, 0.014, 0.03, 0.04, 0.115, 0.415]) - - - # convert from apparent magnitude to flux in maggies - mags = 10**(-0.4*M_AB) - - # Find lower limits of mag uncertainty - mag_down = [x-y for (x,y) in zip(M_AB, M_AB_unc)] - - # convert mag uncertainties to flux uncertainties - flux_down = [10**(-0.4*x) for x in mag_down] - flux_uncertainty = [y-x for (x,y) in zip(mags, flux_down)] - - # Build output dictionary. - obs = {} - # This is a list of sedpy filter objects. See the - # sedpy.observate.load_filters command for more details on its syntax. - obs['filters'] = load_filters(filternames) - # This is a list of maggies, converted from mags. It should have the same - # order as `filters` above. - obs["phot_wave"] = np.array([f.wave_effective for f in load_filters(filternames)]) - obs['maggies'] = np.array(mags) - obs['maggies_unc'] = np.array(flux_uncertainty) - # Here we mask out any NaNs or infs - obs['phot_mask'] = np.isfinite(np.squeeze(mags)) - - # Adding in spectrum - spec_wave, spec_fd, spec_fd_unc = load_spec_csv('FRB190608_corrected_spec.csv') - -# spec_fd = convert(spec_fd, spec_wave, micro=True) -# spec_fd_unc = convert(spec_fd_unc, spec_wave, micro=True) - - -# c1, c2 = chop_wave(spec_wave, 5575, 5581) - -# spec_wave = np.append(spec_wave[c1], spec_wave[c2]) -# spec_fd = np.append(spec_fd[c1], spec_fd[c2]) -# spec_fd_unc = np.append(spec_fd_unc[c1], spec_fd_unc[c2]) - - obs['wavelength'] = spec_wave - obs['spectrum'] = spec_fd * 1e-10 - obs['unc'] = spec_fd_unc * 1e-10 - - # Add unessential bonus info. This will be stored in output - obs['objid'] = objid - - # Adding in mask of bad spectrum range - obs['mask'] = ([]) - for i in range(len(obs['wavelength'])): - obs['mask'].append(True) - for j in range(1662, 1665): - obs['mask'][j] = not obs['mask'][j] - obs = fix_obs(obs) - - return obs - -# -------------- -# SPS Object -# -------------- - -def load_sps(zcontinuous=1, compute_vega_mags=False, **extras): - sps = CSPSpecBasis(zcontinuous=zcontinuous, - compute_vega_mags=compute_vega_mags) - return sps - - -# ----------------- -# Gaussian Process -# ------------------ - -def load_gp(**extras): - return None, None - - -# Load model function - -def load_model(object_redshift=0.11778, add_duste=True, opt_spec=True, - add_dust1 = True, massmet = True, agn = True, - add_neb=True, luminosity_distance=None, **extras): - model_params = TemplateLibrary["parametric_sfh"] - - #fixed values - model_params["imf_type"]["init"] = 1 # Chabrier - model_params["dust_type"]["init"] = 1 # Milky Way extinction law - model_params["sfh"]["init"] = 4 # non delayed-tau - model_params["logzsol"]["isfree"] = True - model_params["tau"]["isfree"] = True - model_params["dust2"]["isfree"] = True - - - # adjust priors - model_params["tau"]["prior"] = priors.LogUniform(mini=0.1, maxi=10) - model_params["mass"]["prior"] = priors.LogUniform(mini=1e8, maxi=1e12) - model_params["tage"]["prior"] = priors.TopHat(mini=0.0, maxi=12.196) - model_params["logzsol"]["prior"] = priors.TopHat(mini=-0.5,maxi=0.19) - model_params["dust2"]["prior"] = priors.TopHat(mini=0.0,maxi=2.0) - #model_params["zred"]["prior"] = priors.TopHat(mini=1.0, maxi=1.5) - - # add AGN - if agn: - model_params.update(TemplateLibrary["agn"]) - model_params['agn_tau']['isfree'] = True # optical depth - model_params['agn_tau']['prior'] = priors.LogUniform(mini=10.0, maxi=90.0) - model_params['fagn']['isfree'] = True - model_params['fagn']['prior'] = priors.LogUniform(mini=1e-5, maxi=2) - model_params['add_dust_agn'] = {'N':1, 'init':True, 'isfree':False, "units":" ", 'prior':None} - - # Add dust1 ONLY if you have a star-forming galaxy - if add_dust1: - model_params['dust1'] = {'N':1, 'init':0.5, 'isfree':False, - 'depends_on': dust2_to_dust1} - # Setting redshift - if object_redshift is not None: - - model_params["zred"]['isfree'] = False - model_params["zred"]['init'] = object_redshift - - # Add nebular emission parameters and turn nebular emission on - # Add gas_loz and gas_logu as free parameters if you have a spectrum - if add_neb: - model_params.update(TemplateLibrary["nebular"]) - model_params['gas_logu'] = {'N':1, 'init': -2, 'isfree':True, - 'prior': priors.TopHat(mini=-4, maxi=-1), 'units': 'Q_H/N_H'} - model_params['gas_logz'] = {'N':1, 'init': 0.0, 'units': 'log Z/Z_\\odot', 'depends_on': gas_logz, - 'isfree':True, 'prior': priors.TopHat(mini=-2.0, maxi=0.5)} - - # Add dust emission in FIR - if add_duste: - model_params.update(TemplateLibrary["dust_emission"]) - model_params["duste_gamma"]["isfree"] = True - model_params["duste_gamma"]["prior"] = priors.LogUniform(mini=0.001, maxi=0.15) - - model_params["duste_qpah"]["isfree"] = True - model_params["duste_qpah"]["prior"] = priors.TopHat(mini=0.5, maxi=7.0) - - model_params["duste_umin"]["isfree"] = True - model_params["duste_umin"]["prior"] = priors.TopHat(mini=0.1,maxi=25) - - # Adding massmet param - if massmet: - model_params['massmet'] = {"name": "massmet", "N": 2, "isfree": True, "init": [8.0, 0.0], - "prior": MassMet(z_mini=-0.5, z_maxi=0.10, mass_mini=9, mass_maxi=11)} - model_params['mass']['isfree']=False - model_params['mass']['depends_on']= massmet_to_mass - model_params['logzsol']['isfree'] =False - model_params['logzsol']['depends_on']=massmet_to_logzol - - if opt_spec: - model_params.update(TemplateLibrary["optimize_speccal"]) - # fit for normalization of spectrum - model_params['spec_norm'] = {'N': 1,'init': 1.0,'isfree': True,'prior': - priors.Normal(sigma=0.2, mean=1.0), 'units': 'f_true/f_obs'} - # Increase the polynomial size to 12 - model_params['polyorder'] = {'N': 1, 'init': 10,'isfree': False} - - run_params["opt_spec"] = True - - # Now instantiate the model using this new dictionary of parameter specifications - model = PolySedModel(model_params) - - elif opt_spec == False: - model = SedModel(model_params) - run_params["opt_spec"] = False - - return model diff --git a/frb/localize/200430_askap2first_offsets_unc.dat b/frb/localize/200430_askap2first_offsets_unc.dat deleted file mode 100644 index 48542952..00000000 --- a/frb/localize/200430_askap2first_offsets_unc.dat +++ /dev/null @@ -1,4 +0,0 @@ --2.2129000015922764 0.7064604947307017 -0.7464166229475457 -0.16104403286219238 0.9671092413987832 2.433276200515735 -2.1137805311094215 -0.17650300636159535 -9.55000000000291 3.9199999999982538 3.2700000000040745 4.730000000003201 4.169999999998254 -0.7300000000032014 2.7600000000020373 2.1199999999953434 -1.6361312769663217 0.41812046405079645 0.8950173119439376 0.38914486586843167 0.5695621886802164 1.0150548307427962 0.784233266473326 0.18181926860224296 -2.20704643148833 0.5651576983558986 1.1231929905523428 0.5179557024691571 0.7794676609458037 1.4621582170580165 1.056067114017585 0.20009633398033716 diff --git a/frb/localize/err_vs_offset_0.pdf b/frb/localize/err_vs_offset_0.pdf deleted file mode 100644 index 8406af12..00000000 Binary files a/frb/localize/err_vs_offset_0.pdf and /dev/null differ diff --git a/frb/localize/jointposition.py b/frb/localize/jointposition.py deleted file mode 100644 index c49dba8a..00000000 --- a/frb/localize/jointposition.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -import os, sys -import numpy as np -from astropy.coordinates import SkyCoord -from astropy import units as u -from matplotlib import pyplot as plt -from matplotlib.colors import BoundaryNorm -from matplotlib.ticker import MaxNLocator - -class Localisation: - def __init__(self, rastring, decstring, uncertaintymaj, uncertaintymin, uncertaintypadeg): - self.pos = SkyCoord(rastring + " " + decstring) - self.uncertaintymaj = uncertaintymaj - self.uncertaintymin = uncertaintymin - self.uncertaintypadeg = uncertaintypadeg - - def chisq(self, position): - offset = position.separation(self.pos).arcsecond - pa = position.position_angle(self.pos).degree - deltaparad = (pa - self.uncertaintypadeg)*np.pi/180 - uncertainty = np.sqrt((self.uncertaintymaj*np.cos(deltaparad))**2 + (self.uncertaintymin*np.sin(deltaparad))**2) - #print(self.pos, position, offset, uncertainty) - return (offset / uncertainty)**2 - -# The map centre is the VLBI localisation -mapcentre = SkyCoord("05h08m03.5077s +26d03m38.504s") -constraints = [] -constraints.append(Localisation("05h08m03.475s", "+26d03m38.44s", 1.25, 0.67, 140)) -constraints.append(Localisation("05h08m03.665s", "+26d03m39.50s", 1.55, 0.91, 42)) -constraints.append(Localisation("05h08m03.540s", "+26d03m39.05s", 1.14, 0.45, 0)) - -gridsize = 50 -gridspacing = 0.1 #arcsec -chisqgrid = np.zeros(gridsize*gridsize).reshape(gridsize, gridsize) -x = np.zeros(gridsize*gridsize).reshape(gridsize, gridsize) -y = np.zeros(gridsize*gridsize).reshape(gridsize, gridsize) - -for xidx in range(gridsize): - xoffset = (xidx - gridsize//2)*gridspacing - print("Working on row",xidx+1,"/",gridsize) - for yidx in range(gridsize): - yoffset = (yidx - gridsize//2)*gridspacing - x[xidx, yidx] = xoffset - y[xidx, yidx] = yoffset - sc = SkyCoord(mapcentre.ra + xoffset*u.arcsec, mapcentre.dec + yoffset*u.arcsec) - for c in constraints: - chisqgrid[xidx][yidx] += c.chisq(sc) -pgrid = np.exp(-0.5*np.square(chisqgrid)) -pgrid /= np.sum(pgrid) -ex = [x[0,0], x[gridsize-1,gridsize-1], y[0,0], y[gridsize-1,gridsize-1]] - -print("Minimum chi squared is ", np.min(chisqgrid)) -m = np.min(chisqgrid) - -cmap = plt.get_cmap('PiYG') -#levels = MaxNLocator(nbins=3).tick_values(1, 10) -levels = [m, m+2.3, m+6.17, m+11.8] -norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) -cf = plt.contourf(x, y, chisqgrid, levels=levels, cmap=cmap) -plt.savefig("chisqplot.png") - -plt.clf() -n = 1000 -t = np.linspace(0, pgrid.max(), n) -integral = ((pgrid >= t[:, None, None]) * pgrid).sum(axis=(1,2)) -from scipy import interpolate -f = interpolate.interp1d(integral, t) -t_contours = f(np.array([0.95, 0.68])) - -plt.imshow(pgrid, origin='lower', extent=ex, cmap="gray") -plt.contour(pgrid, t_contours, extent=ex) -plt.savefig("probplot.png") - diff --git a/frb/localize/weighted_multi_image_fit_updated.py b/frb/localize/weighted_multi_image_fit_updated.py deleted file mode 100644 index 4e78ec85..00000000 --- a/frb/localize/weighted_multi_image_fit_updated.py +++ /dev/null @@ -1,286 +0,0 @@ -###### README ##### - -# author: Clancy W. James -# email: clancy.james@curtin.edu.au - -# Purpose: perform a single statistical test: -# does fitting a mean reduce the variance? But it does this using weights according to estimated errors - -# Run using: -# python weighted_multi_image_fit.py.py infile1 infile2 ... -# e.g. python multi_image_fit.py 190102.dat 190608.dat 190711.dat -# Infiles have: one column per source, rows are ra_offset, dec_offset, ra_err, dec_err -# Please use simple text file with numbers and whitespace separation - -# Output: some statistical shit - -import numpy as np -import sys -import os -import scipy.stats as stats -from matplotlib import pyplot as plt - - -def load_data(infile): - print("Loading data infile ",infile,"...") - if os.path.isfile(infile): - try: - data=np.loadtxt(infile) # atual data read statement - except: - print("ERROR: error reading infile ",infile) - exit() - else: - print("ERROR: Could not locate input file ",infile) - exit() - dims=data.shape - - if len(dims) != 2 or dims[0] != 4: - print("ERROR: Please ensure data is in correct format") - print("Four rows: ra offset, dec offset, ra err, dec err") - print("and one column per source.") - exit() - print(" Success! Found ",dims[1]," sources") - return data - - -# Main program, one big module -def main(): - - print("\n\n\n\n") - print(" #########################################################") - print(" ## Clancy's slightly less dodgy error analysis program ##") - print(" #########################################################\n\n") - - - - ########### Checking all is OK ######### - nargs=len(sys.argv) - if nargs == 1: - print("ERROR: Please enter input file type as first argument") - exit() - - nfiles=nargs-1 - - print("Reading in data from ",nfiles," input files") - data=[] - ndtot=0 - ndoff=0 - for i in np.arange(nfiles): - data.append(load_data(sys.argv[i+1])) - - - print("\n\n\n\n ########## H0: no offsets ########") - # This part of the routine tests the H0 that the measured positions and errors - # are consistent with 0,0 at the stated level of uncertainty - # it simply does this by calculating the variance in the offsets, and in - # the stated errors. - - ### estimates global sigmas ### - # estimated variance based on offsets - var=0 - var_dec=0 - var_ra=0 - nsrc=np.zeros([nfiles]) - var_w=0 - - ######## loops through sources ####### - # artificial scale - for i,frb in enumerate(data): - nsrc[i]=(frb.shape[1]) - # weight each point with 1/err**2 - # we weight the measured offsets by 1/err. - # Under H0, this makes them come from a standard normal distribution - wr=frb[0,:]/frb[2,:] - wd=frb[1,:]/frb[3,:] - - # we now calculate the variances using the std normal deviations - vr = np.sum(wr**2) - vd = np.sum(wd**2) - # add these to cumulative sums over all the FRB sets - - var += vr+vd - var_ra += vr - var_dec += vd - - # I do not know what this is for - var_w += np.sum((1./frb[2,:])**2)+np.sum((1./frb[3,:])**2) - - - n=frb[0,:].size - meanr=np.sum(frb[0,:])/n - meand=np.sum(frb[1,:])/n - offr=np.abs(frb[0,:]-meanr) - offd=np.abs(frb[1,:]-meand) - - plt.figure() - plt.xlabel('Offet from unweighted mean') - plt.ylabel('Error on point') - plt.plot(offr,frb[2,:],color='red',linestyle='',marker='x',label='ra') - plt.plot(offd,frb[3,:],color='blue',linestyle='',marker='o',label='dec') - plt.legend() - plt.tight_layout() - plt.savefig('err_vs_offset_'+str(i)+'.pdf') - plt.close() - - # Under H0, var, var_ra, and var_dec should come from a chi2 distribution with - # nsrc_tot degrees of freedom - - # calculate the degrees of freedom for var_ra (nsrc) and var (2*this due to ra and dec) - nsrc_tot=np.sum(nsrc) - ndat_tot=nsrc_tot*2 - - # Under H0, the following should be ~1. This helps estimate how much larger/smaller - # the true number must be - sig_ra=(var_ra/nsrc_tot)**0.5 - sig_dec=(var_dec/nsrc_tot)**0.5 - sig=(var/nsrc_tot/2.)**0.5 - - ############ Testing that variance between ra and dec is equal IF H0 is true ########### - # We now perform an f-test for equality of variance between ra and dec - # the goal is to work out what the errors would have to be in oder to satisfy H0 - f_stat=var_ra/var_dec - #NOTE: use nsrc_tot, not nsrc_tot-1, since we *know* the means in this example - it is 0,0 (H0) - cdf=stats.f.cdf(f_stat,nsrc_tot,nsrc_tot) - # performs a two-sided test for the rtio being too small or too big. - p_val=min(cdf,1.-cdf)*2. - print("Under H0, errors in ra,dec are {0:6.2f} {1:6.2f} greater than quoted in file".format(sig_ra,sig_dec)) - print("Assuming they are intrinsically equal (two-sided p-value of {0:6.2f}), this factor is {1:6.2f}".format(p_val,sig)) - - - print("\n\n\n\n ########## H1: offsets ########") - ### now calculates mean offsets for each source ### - - mean_ra=np.zeros([nfiles]) - mean_dec=np.zeros([nfiles]) - mean_ra_err=np.zeros([nfiles]) - mean_dec_err=np.zeros([nfiles]) - var_ra_mean=0 - var_dec_mean=0 - var_mean=0 - print("Calculating offsets (assuming correct errors)...") - print("Source ra_off (u/w scat err) [w scat err] [[input error]] dec_off (u/w scat err) [w scat err] [[input error]]:") - for i,frb in enumerate(data): - - # This returns an estimate for the weighted mean and an error in that mean - # i.e. it is reduced by the sqrt N factor - # previously this was bugged, and did not use the optimal weighting (1/err^2) - mean_ra[i],mean_ra_err[i]=get_weighted_mean(frb[0,:],frb[2,:]) - mean_dec[i],mean_dec_err[i]=get_weighted_mean(frb[1,:],frb[3,:]) - - # number of degrees of freedom in one dimension - npts=frb[0,:].size - ndf=npts-1. #for each coordinate - reldf=ndf/npts - - #### standard calculations ###### - # we now calculate the offsets given our estimate for the mean offset - dwr=(frb[0,:]-mean_ra[i]) - dwd=(frb[1,:]-mean_dec[i]) - - # calculates unweighted scatter error - # first lines cal the std dev of the scatter - # next the error in the mean - sigr=(np.sum(dwr**2)/ndf)**0.5 - sigd=(np.sum(dwd**2)/ndf)**0.5 - sigr /= npts**0.5 - sigd /= npts*0.5 - - # if the errors are correct, then the below should be a standard normal deviate - # i.e. any deviation from 0 is purely due to random variation - wr=dwr/frb[2,:] - wd=dwd/frb[3,:] - - ###### calculations for chi2 tests only ####### - # calculates new variance under H0: no true offset - # will come from a chi2 distribution with (2*) n-nfrb degrees of freedom - vr = np.sum(wr**2) - vd = np.sum(wd**2) - # we do this for ra and dec separately, and together - var_mean += vr+vd - var_ra_mean += vr - var_dec_mean += vd - - ######### Calculations for error in mean ######## - # reduces this for the weighting, i.e. so it's agnostic to the values of the weights - # this is identical to the std dev of the weighted mean, but re-normalised by the - # sum of the weights, such that it is only the *relative* weights that count - # the base offsets are also derived from data, not the input values of std - # total formula is \sqrt{ (\sum_i w (xi-xbar)^2 )/((M-1)/M * \sum_i w_i) } - # here the numerator is simply vr and vd - # this gives the standard deviation of the sample. The error in the mean - # will be sqrt(n) less than this, i.e. the factor M-1/M becomes M-1 - wvr = vr/(ndf*np.sum(frb[2,:]**-2)) - wvd = vd/(ndf*np.sum(frb[3,:]**-2)) - - errr = wvr**0.5 - errd = wvd**0.5 - print(" {0:d} {1:8.2f} ({2:6.2f}) [{3:6.2f}] [[{4:6.2f}]] {5:14.2f} ({6:6.2f}) [{7:6.2f}] [[{8:6.2f}]]".format(i,mean_ra[i],sigr,errr,mean_ra_err[i],mean_dec[i],sigd,errd,mean_dec_err[i])) - #mean_dec[i],mean_ra_err[i],mean_dec_err[i],errr,errd)) - #print(" The chi2 from subsequent fits is ",errr,errd) - - print("In the above, 'scatter error' is derived by ignoring the quoted values of") - print("the errors, and simply looking at the scatter of points about the mean.") - print("Input error is estimated by assuming that the quoted errors in the input file are correct.") - print("The scatter error is more robust.") - print("The input error is more rigourously correct IF the inputs are sensible.") - print("In general, take the input error unless the scatter error is significantly different.") - - all_means=np.concatenate((mean_ra,mean_dec)) - all_errs=np.concatenate((mean_ra_err,mean_dec_err)) - sigma_offsets=(np.sum(all_means**2/all_errs)/np.sum(1./all_errs))**0.5 - #print("Standard deviation of offsets about 0 is {0:6.2f}".format(sigma_offsets)) - - # performs an f-test for reduction in variance between 0,0 model and offsets in ra,dec - # NOTE: if setting ra OR dec offset to zero, then the result will be different - print("\n\n\n\n#### Testing for rejection of H0 ####") - # test for reduction in variance - # calculates f-stat for Chow test - ndf1=nsrc_tot*2 # degrees of freedom for total number of points - ndf2=2*nsrc_tot-2*nfiles # df including means - f_stat=( (var-var_mean)/ndf1 ) / ( var_mean / ndf2 ) - cdf=stats.f.cdf(f_stat,ndf1,ndf2) - # reject if F is large - print("Fitting offsets reduced variance by a factor of {0:6.2f}".format(var/var_mean)) - print("Probability under the no-offset hypothesis that fitting") - print(" offsets would lead to at least this much reduction ") - print(" in variance is {0:5.2f}".format(1.-cdf)) - - - print("\n\n\n\n ##### Is the new offset consistent with the errors? \n") - ndf_1d=ndf2/2 # for ra or dec only - p_both=1.-stats.chi2.cdf(var_mean,ndf2) - p_ra=1.-stats.chi2.cdf(var_ra_mean,ndf_1d) - p_dec=1.-stats.chi2.cdf(var_dec_mean,ndf_1d) - print("Coord Variance ndf p \n") - print("Both: {0:10.2f} {1:4d} {2:7.2E}".format(var_mean,int(ndf2),p_both)) - print(" Ra: {0:10.2f} {1:4d} {2:7.2E}".format(var_ra_mean,int(ndf_1d),p_ra)) - print(" Dec: {0:10.2f} {1:4d} {2:7.2E}".format(var_dec_mean,int(ndf_1d),p_dec)) - print("(If p-values are low, then random errors will be larger than given in inputs.)") - print("(If p-values are high, then random errors are consistent with inputs.)") - - - - # we now estimate the error on the mean using observed differences - print("\n\n\n ##### Estimating new mean from observed offsets ####\n") - - crit_90=stats.f.ppf(0.9,ndf1,ndf2) - var_mean_90=(var/ndf1) / (1./ndf1 + crit_90/ndf2) - std_dev_offsets=((var-var_mean_90)/var_w)**0.5 - - print("\n\n\n\b### Estimating characteristic offset scale which could have been detected #####") - print("At a 90% confidence level, the critical reduction in variance would be {0:6.2f}".format(var/var_mean_90)) - print(" This corresponds to an average std deviation in offsets of {0:8.2f}".format(std_dev_offsets)) - print(" CAUTION: this is only an estimate, not a hard statistical statement") - -# calculates weighted means and error on them from list of observations and errors -def get_weighted_mean(obs,err): - # we use the weight as 1/err**2 - mean=np.sum(obs/err**2)/np.sum(1./err**2) - # the std dev in this mean is - err=(np.sum(1./err**2))**-0.5 - #err=obs.size**0.5/np.sum(1./err**2) - return mean,err - - - -main() diff --git a/frb/tests/files/traceplot.png b/frb/tests/files/traceplot.png deleted file mode 100644 index d032e3f5..00000000 Binary files a/frb/tests/files/traceplot.png and /dev/null differ diff --git a/frb/tests/files/tst.png b/frb/tests/files/tst.png deleted file mode 100644 index d5e2fb57..00000000 Binary files a/frb/tests/files/tst.png and /dev/null differ diff --git a/frb/tests/outfile.txt b/frb/tests/outfile.txt deleted file mode 100644 index 3fff641a..00000000 --- a/frb/tests/outfile.txt +++ /dev/null @@ -1,11 +0,0 @@ -,objname,prefix,objid -0,20180907B,FRB,55549 -1,2019hm,AT,33406 -2,2019quh,AT,45438 -3,2019qui,AT,45439 -4,2022nmq,SN,111147 -5,2022rqy,AT,114017 -6,2022uku,AT,115907 -7,2023abby,AT,142713 -8,2023abue,AT,143188 -9,2023ld,AT,123073