Skip to content

Commit

Permalink
added first draft of logging UT-CHG#155
Browse files Browse the repository at this point in the history
  • Loading branch information
lcgraham committed May 17, 2016
1 parent 27a9e22 commit 03c43ed
Show file tree
Hide file tree
Showing 6 changed files with 42 additions and 39 deletions.
17 changes: 9 additions & 8 deletions bet/calculateP/simpleFunP.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from bet.Comm import comm, MPI
import numpy as np
import bet.calculateP.voronoiHistogram as vHist
import collections
import collections, logging
import bet.util as util
import bet.sample as samp

Expand Down Expand Up @@ -190,9 +190,9 @@ def normal_normal(data_set, Q_ref, M, std, num_d_emulate=1E6):
covariance = std**2

d_distr_samples = np.zeros((M, len(Q_ref)))
print "d_distr_samples.shape", d_distr_samples.shape
print "Q_ref.shape", Q_ref.shape
print "std.shape", std.shape
logging.info("d_distr_samples.shape", d_distr_samples.shape)
logging.info("Q_ref.shape", Q_ref.shape)
logging.info("std.shape", std.shape)

if comm.rank == 0:
for i in range(len(Q_ref)):
Expand Down Expand Up @@ -422,14 +422,15 @@ def uniform_hyperrectangle_binsize(data_set, Q_ref, bin_size,
else:
if not len(center_pts_per_edge) == dim:
center_pts_per_edge = np.ones((dim,))
print 'Warning: center_pts_per_edge dimension mismatch.'
print 'Using 1 in each dimension.'
msg = 'center_pts_per_edge dimension mismatch.'
msg += 'Using 1 in each dimension.'
logging.warning(msg)
if np.any(np.less(center_pts_per_edge, 0)):
print 'Warning: center_pts_per_edge must be greater than 0'
logging.warning('center_pts_per_edge must be greater than 0')
if not isinstance(bin_size, collections.Iterable):
bin_size = bin_size*np.ones((dim,))
if np.any(np.less(bin_size, 0)):
print 'Warning: center_pts_per_edge must be greater than 0'
logging.warning('center_pts_per_edge must be greater than 0')

sur_domain = np.array([np.min(data, 0), np.max(data, 0)]).transpose()

Expand Down
9 changes: 5 additions & 4 deletions bet/calculateP/voronoiHistogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
volumes of these cells.
"""

import logging
import numpy as np
import bet.util as util
import bet.sample as samp
Expand Down Expand Up @@ -50,7 +51,7 @@ def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size,
if np.any(np.greater(r_size, rect_width)):
msg = "The hyperrectangle defined by this size extends outside the "
msg += "original domain."
print msg
logging.warning(msg)

# determine the locations of the points for the 1st bounding layer
layer1_left = rect_domain[:, 0]-rect_width/(2*center_pts_per_edge)
Expand Down Expand Up @@ -101,7 +102,7 @@ def center_and_layer1_points(center_pts_per_edge, center, r_ratio, sur_domain):
if np.all(np.greater(r_ratio, 1)):
msg = "The hyperrectangle defined by this ratio is larger than the"
msg += " original domain."
print msg
logging.warning(msg)

# determine r_size from the width of the surrounding domain
r_size = r_ratio*(sur_domain[:, 1]-sur_domain[:, 0])
Expand Down Expand Up @@ -141,11 +142,11 @@ def edges_regular(center_pts_per_edge, rect_domain, sur_domain):
if np.any(np.greater_equal(sur_domain[:, 0], rect_domain[:, 0])):
msg = "The hyperrectangle defined by this size is larger than the"
msg += " original domain."
print msg
logging.warning(msg)
elif np.any(np.less_equal(sur_domain[:, 1], rect_domain[:, 1])):
msg = "The hyperrectangle defined by this size is larger than the"
msg += " original domain."
print msg
logging.warning(msg)

rect_edges = list()
rect_and_sur_edges = list()
Expand Down
7 changes: 4 additions & 3 deletions bet/postProcess/postTools.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import numpy as np
import scipy.io as sio
import bet.sample as sample
import logging

class dim_not_matching(Exception):
"""
Expand Down Expand Up @@ -380,9 +381,9 @@ def compare_yield(sort_ind, sample_quality, run_param, column_headings=None):
"""
if column_headings == None:
column_headings = "Run parameters"
print "Sample Set No., Quality, "+ str(column_headings)
logging.info("Sample Set No., Quality, "+ str(column_headings))
for i in reversed(sort_ind):
print i, sample_quality[i], np.round(run_param[i], 3)
logging.info(i, sample_quality[i], np.round(run_param[i], 3))

def in_high_prob(data, rho_D, maximum, sample_nos=None):
"""
Expand Down Expand Up @@ -411,7 +412,7 @@ def in_high_prob(data, rho_D, maximum, sample_nos=None):
else:
rD = rho_D(data[sample_nos, :])
adjusted_total_prob = int(sum(rD)/maximum)
print "Samples in box "+str(adjusted_total_prob)
logging.info("Samples in box "+str(adjusted_total_prob))
return adjusted_total_prob

def in_high_prob_multi(results_list, rho_D, maximum, sample_nos_list=None):
Expand Down
6 changes: 3 additions & 3 deletions bet/sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
:class:`bet.sample.dim_not_matching`
"""

import os, warnings
import os, logging
import numpy as np
import scipy.spatial as spatial
import scipy.io as sio
Expand Down Expand Up @@ -82,7 +82,7 @@ def load_sample_set(file_name, sample_set_name=None):
if sample_set_name+"_dim" in mdat.keys():
loaded_set = sample_set(np.squeeze(mdat[sample_set_name+"_dim"]))
else:
warnings.warn("No sample_set named {} with _dim in file".\
logging.info("No sample_set named {} with _dim in file".\
format(sample_set_name))
return None

Expand Down Expand Up @@ -911,7 +911,7 @@ def __init__(self, input_sample_set, output_sample_set,
if output_sample_set is not None:
self.check_nums()
else:
warnings.warn("No output_sample_set")
logging.info("No output_sample_set")

def check_nums(self):
"""
Expand Down
18 changes: 9 additions & 9 deletions bet/sampling/adaptiveSampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import scipy.io as sio
import bet.sampling.basicSampling as bsam
import bet.util as util
import math, os, glob
import math, os, glob, logging
from bet.Comm import comm
import bet.sample as sample

Expand Down Expand Up @@ -274,7 +274,7 @@ def generalized_chains(self, input_obj, t_set, kern,
min_ratio = t_set.min_ratio

if not hot_start:
print "COLD START"
logging.info("COLD START")
step_ratio = t_set.init_ratio*np.ones(self.num_chains_pproc)

# Initiative first batch of N samples (maybe taken from latin
Expand Down Expand Up @@ -302,14 +302,14 @@ def generalized_chains(self, input_obj, t_set, kern,
# LOAD FILES
if hot_start == 1: # HOT START FROM PARTIAL RUN
if comm.rank == 0:
print "HOT START from partial run"
logging.info("HOT START from partial run")
# Find and open save files
save_dir = os.path.dirname(savefile)
base_name = os.path.dirname(savefile)
mdat_files = glob.glob(os.path.join(save_dir,
"proc*_{}".format(base_name)))
if len(mdat_files) == 0:
print "HOT START using serial file"
logging.info("HOT START using serial file")
mdat = sio.loadmat(savefile)
disc = sample.load_discretization(savefile)
kern_old = np.squeeze(mdat['kern_old'])
Expand All @@ -331,7 +331,7 @@ def generalized_chains(self, input_obj, t_set, kern,
all_step_ratios = np.reshape(all_step_ratios,
(self.num_chains, -1), 'F')
elif hot_start == 1 and len(mdat_files) == comm.size:
print "HOT START using parallel files (same nproc)"
logging.info("HOT START using parallel files (same nproc)")
# if the number of processors is the same then set mdat to
# be the one with the matching processor number (doesn't
# really matter)
Expand All @@ -340,7 +340,7 @@ def generalized_chains(self, input_obj, t_set, kern,
kern_old = np.squeeze(mdat['kern_old'])
all_step_ratios = np.squeeze(mdat['step_ratios'])
elif hot_start == 1 and len(mdat_files) != comm.size:
print "HOT START using parallel files (diff nproc)"
logging.info("HOT START using parallel files (diff nproc)")
# Determine how many processors the previous data used
# otherwise gather the data from mdat and then scatter
# among the processors and update mdat
Expand Down Expand Up @@ -387,7 +387,7 @@ def generalized_chains(self, input_obj, t_set, kern,
kern_old = np.concatenate(kern_old)
if hot_start == 2: # HOT START FROM COMPLETED RUN:
if comm.rank == 0:
print "HOT START from completed run"
logging.info("HOT START from completed run")
mdat = sio.loadmat(savefile)
disc = sample.load_discretization(savefile)
kern_old = np.squeeze(mdat['kern_old'])
Expand Down Expand Up @@ -459,10 +459,10 @@ def generalized_chains(self, input_obj, t_set, kern,
if self.chain_length < 4:
pass
elif comm.rank == 0 and (batch+1)%(self.chain_length/4) == 0:
print "Current chain length: "+\
logging.info("Current chain length: "+\
str(batch+1)+"/"+str(self.chain_length)
disc._input_sample_set.append_values_local(input_new.\
get_values_local())
get_values_local()))
disc._output_sample_set.append_values_local(output_new_values)
all_step_ratios = np.concatenate((all_step_ratios, step_ratio))
mdat['step_ratios'] = all_step_ratios
Expand Down
24 changes: 12 additions & 12 deletions bet/sensitivity/chooseQoIs.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def chooseOptQoIs_verbose(input_set, qoiIndices=None, num_qois_return=None,
if comm.rank == 0:
qoi_combs = np.array(list(combinations(list(qoiIndices),
num_qois_return)))
print 'Possible sets of QoIs : ', qoi_combs.shape[0]
logging.info('Possible sets of QoIs : ', qoi_combs.shape[0])
qoi_combs = np.array_split(qoi_combs, comm.size)
else:
qoi_combs = None
Expand Down Expand Up @@ -396,10 +396,10 @@ def find_unique_vecs(input_set, inner_prod_tol, qoiIndices=None,
G = G/np.tile(norm_G, (input_dim, 1, 1)).transpose(1, 2, 0)

if comm.rank == 0:
print '*** find_unique_vecs ***'
print 'num_zerovec : ', len(indz), 'of (', G.shape[1],\
') original QoIs'
print 'Possible QoIs : ', len(qoiIndices) - len(indz)
logging.info('*** find_unique_vecs ***')
logging.info('num_zerovec : ', len(indz), 'of (', G.shape[1],\
') original QoIs')
logging.info('Possible QoIs : ', len(qoiIndices) - len(indz))
qoiIndices = list(set(qoiIndices) - set(indz))

# Find all num_qois choose 2 pairs of QoIs
Expand All @@ -423,7 +423,7 @@ def find_unique_vecs(input_set, inner_prod_tol, qoiIndices=None,

unique_vecs = np.array(list(set(qoiIndices) - set(repeat_vec)))
if comm.rank == 0:
print 'Unique QoIs : ', unique_vecs.shape[0]
logging.info('Unique QoIs : ', unique_vecs.shape[0])

return unique_vecs

Expand Down Expand Up @@ -552,10 +552,10 @@ def find_good_sets(input_set, good_sets_prev, unique_indices,
good_sets_new = np.append(good_sets_new, each[1:], axis=0)
good_sets = good_sets_new

print 'Possible sets of QoIs of size %i : '%good_sets.shape[1],\
np.sum(count_qois)
print 'Good sets of QoIs of size %i : '%good_sets.shape[1],\
good_sets.shape[0] - 1
logging.info('Possible sets of QoIs of size %i : '%good_sets.shape[1],\
np.sum(count_qois))
logging.info('Good sets of QoIs of size %i : '%good_sets.shape[1],\
good_sets.shape[0] - 1)

comm.Barrier()
best_sets = comm.bcast(best_sets, root=0)
Expand Down Expand Up @@ -664,7 +664,7 @@ def chooseOptQoIs_large_verbose(input_set, qoiIndices=None,
unique_indices = find_unique_vecs(input_set, inner_prod_tol, qoiIndices,
remove_zeros)
if comm.rank == 0:
print 'Unique Indices are : ', unique_indices
logging.info('Unique Indices are : ', unique_indices)

good_sets_curr = util.fix_dimensions_vector_2darray(unique_indices)
best_sets = []
Expand All @@ -678,6 +678,6 @@ def chooseOptQoIs_large_verbose(input_set, qoiIndices=None,
best_sets.append(best_sets_curr)
optsingvals_list.append(optsingvals_tensor_curr)
if comm.rank == 0:
print best_sets_curr
logging.info(best_sets_curr)

return (best_sets, optsingvals_list)

0 comments on commit 03c43ed

Please sign in to comment.