From 03c43eda612db45565f5a28849b3c0c16fb18ec6 Mon Sep 17 00:00:00 2001 From: Lindley Graham Date: Tue, 17 May 2016 13:55:34 -0400 Subject: [PATCH] added first draft of logging #155 --- bet/calculateP/simpleFunP.py | 17 +++++++++-------- bet/calculateP/voronoiHistogram.py | 9 +++++---- bet/postProcess/postTools.py | 7 ++++--- bet/sample.py | 6 +++--- bet/sampling/adaptiveSampling.py | 18 +++++++++--------- bet/sensitivity/chooseQoIs.py | 24 ++++++++++++------------ 6 files changed, 42 insertions(+), 39 deletions(-) diff --git a/bet/calculateP/simpleFunP.py b/bet/calculateP/simpleFunP.py index 876c171a..eba0b927 100644 --- a/bet/calculateP/simpleFunP.py +++ b/bet/calculateP/simpleFunP.py @@ -8,7 +8,7 @@ from bet.Comm import comm, MPI import numpy as np import bet.calculateP.voronoiHistogram as vHist -import collections +import collections, logging import bet.util as util import bet.sample as samp @@ -190,9 +190,9 @@ def normal_normal(data_set, Q_ref, M, std, num_d_emulate=1E6): covariance = std**2 d_distr_samples = np.zeros((M, len(Q_ref))) - print "d_distr_samples.shape", d_distr_samples.shape - print "Q_ref.shape", Q_ref.shape - print "std.shape", std.shape + logging.info("d_distr_samples.shape", d_distr_samples.shape) + logging.info("Q_ref.shape", Q_ref.shape) + logging.info("std.shape", std.shape) if comm.rank == 0: for i in range(len(Q_ref)): @@ -422,14 +422,15 @@ def uniform_hyperrectangle_binsize(data_set, Q_ref, bin_size, else: if not len(center_pts_per_edge) == dim: center_pts_per_edge = np.ones((dim,)) - print 'Warning: center_pts_per_edge dimension mismatch.' - print 'Using 1 in each dimension.' + msg = 'center_pts_per_edge dimension mismatch.' + msg += 'Using 1 in each dimension.' + logging.warning(msg) if np.any(np.less(center_pts_per_edge, 0)): - print 'Warning: center_pts_per_edge must be greater than 0' + logging.warning('center_pts_per_edge must be greater than 0') if not isinstance(bin_size, collections.Iterable): bin_size = bin_size*np.ones((dim,)) if np.any(np.less(bin_size, 0)): - print 'Warning: center_pts_per_edge must be greater than 0' + logging.warning('center_pts_per_edge must be greater than 0') sur_domain = np.array([np.min(data, 0), np.max(data, 0)]).transpose() diff --git a/bet/calculateP/voronoiHistogram.py b/bet/calculateP/voronoiHistogram.py index 2d2f4ce4..d6866c9f 100644 --- a/bet/calculateP/voronoiHistogram.py +++ b/bet/calculateP/voronoiHistogram.py @@ -7,6 +7,7 @@ volumes of these cells. """ +import logging import numpy as np import bet.util as util import bet.sample as samp @@ -50,7 +51,7 @@ def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size, if np.any(np.greater(r_size, rect_width)): msg = "The hyperrectangle defined by this size extends outside the " msg += "original domain." - print msg + logging.warning(msg) # determine the locations of the points for the 1st bounding layer layer1_left = rect_domain[:, 0]-rect_width/(2*center_pts_per_edge) @@ -101,7 +102,7 @@ def center_and_layer1_points(center_pts_per_edge, center, r_ratio, sur_domain): if np.all(np.greater(r_ratio, 1)): msg = "The hyperrectangle defined by this ratio is larger than the" msg += " original domain." - print msg + logging.warning(msg) # determine r_size from the width of the surrounding domain r_size = r_ratio*(sur_domain[:, 1]-sur_domain[:, 0]) @@ -141,11 +142,11 @@ def edges_regular(center_pts_per_edge, rect_domain, sur_domain): if np.any(np.greater_equal(sur_domain[:, 0], rect_domain[:, 0])): msg = "The hyperrectangle defined by this size is larger than the" msg += " original domain." - print msg + logging.warning(msg) elif np.any(np.less_equal(sur_domain[:, 1], rect_domain[:, 1])): msg = "The hyperrectangle defined by this size is larger than the" msg += " original domain." - print msg + logging.warning(msg) rect_edges = list() rect_and_sur_edges = list() diff --git a/bet/postProcess/postTools.py b/bet/postProcess/postTools.py index 4f5318ce..07a71379 100644 --- a/bet/postProcess/postTools.py +++ b/bet/postProcess/postTools.py @@ -7,6 +7,7 @@ import numpy as np import scipy.io as sio import bet.sample as sample +import logging class dim_not_matching(Exception): """ @@ -380,9 +381,9 @@ def compare_yield(sort_ind, sample_quality, run_param, column_headings=None): """ if column_headings == None: column_headings = "Run parameters" - print "Sample Set No., Quality, "+ str(column_headings) + logging.info("Sample Set No., Quality, "+ str(column_headings)) for i in reversed(sort_ind): - print i, sample_quality[i], np.round(run_param[i], 3) + logging.info(i, sample_quality[i], np.round(run_param[i], 3)) def in_high_prob(data, rho_D, maximum, sample_nos=None): """ @@ -411,7 +412,7 @@ def in_high_prob(data, rho_D, maximum, sample_nos=None): else: rD = rho_D(data[sample_nos, :]) adjusted_total_prob = int(sum(rD)/maximum) - print "Samples in box "+str(adjusted_total_prob) + logging.info("Samples in box "+str(adjusted_total_prob)) return adjusted_total_prob def in_high_prob_multi(results_list, rho_D, maximum, sample_nos_list=None): diff --git a/bet/sample.py b/bet/sample.py index 204176ca..cc6f6a9f 100644 --- a/bet/sample.py +++ b/bet/sample.py @@ -8,7 +8,7 @@ :class:`bet.sample.dim_not_matching` """ -import os, warnings +import os, logging import numpy as np import scipy.spatial as spatial import scipy.io as sio @@ -82,7 +82,7 @@ def load_sample_set(file_name, sample_set_name=None): if sample_set_name+"_dim" in mdat.keys(): loaded_set = sample_set(np.squeeze(mdat[sample_set_name+"_dim"])) else: - warnings.warn("No sample_set named {} with _dim in file".\ + logging.info("No sample_set named {} with _dim in file".\ format(sample_set_name)) return None @@ -911,7 +911,7 @@ def __init__(self, input_sample_set, output_sample_set, if output_sample_set is not None: self.check_nums() else: - warnings.warn("No output_sample_set") + logging.info("No output_sample_set") def check_nums(self): """ diff --git a/bet/sampling/adaptiveSampling.py b/bet/sampling/adaptiveSampling.py index c9e6ae11..0e941231 100644 --- a/bet/sampling/adaptiveSampling.py +++ b/bet/sampling/adaptiveSampling.py @@ -16,7 +16,7 @@ import scipy.io as sio import bet.sampling.basicSampling as bsam import bet.util as util -import math, os, glob +import math, os, glob, logging from bet.Comm import comm import bet.sample as sample @@ -274,7 +274,7 @@ def generalized_chains(self, input_obj, t_set, kern, min_ratio = t_set.min_ratio if not hot_start: - print "COLD START" + logging.info("COLD START") step_ratio = t_set.init_ratio*np.ones(self.num_chains_pproc) # Initiative first batch of N samples (maybe taken from latin @@ -302,14 +302,14 @@ def generalized_chains(self, input_obj, t_set, kern, # LOAD FILES if hot_start == 1: # HOT START FROM PARTIAL RUN if comm.rank == 0: - print "HOT START from partial run" + logging.info("HOT START from partial run") # Find and open save files save_dir = os.path.dirname(savefile) base_name = os.path.dirname(savefile) mdat_files = glob.glob(os.path.join(save_dir, "proc*_{}".format(base_name))) if len(mdat_files) == 0: - print "HOT START using serial file" + logging.info("HOT START using serial file") mdat = sio.loadmat(savefile) disc = sample.load_discretization(savefile) kern_old = np.squeeze(mdat['kern_old']) @@ -331,7 +331,7 @@ def generalized_chains(self, input_obj, t_set, kern, all_step_ratios = np.reshape(all_step_ratios, (self.num_chains, -1), 'F') elif hot_start == 1 and len(mdat_files) == comm.size: - print "HOT START using parallel files (same nproc)" + logging.info("HOT START using parallel files (same nproc)") # if the number of processors is the same then set mdat to # be the one with the matching processor number (doesn't # really matter) @@ -340,7 +340,7 @@ def generalized_chains(self, input_obj, t_set, kern, kern_old = np.squeeze(mdat['kern_old']) all_step_ratios = np.squeeze(mdat['step_ratios']) elif hot_start == 1 and len(mdat_files) != comm.size: - print "HOT START using parallel files (diff nproc)" + logging.info("HOT START using parallel files (diff nproc)") # Determine how many processors the previous data used # otherwise gather the data from mdat and then scatter # among the processors and update mdat @@ -387,7 +387,7 @@ def generalized_chains(self, input_obj, t_set, kern, kern_old = np.concatenate(kern_old) if hot_start == 2: # HOT START FROM COMPLETED RUN: if comm.rank == 0: - print "HOT START from completed run" + logging.info("HOT START from completed run") mdat = sio.loadmat(savefile) disc = sample.load_discretization(savefile) kern_old = np.squeeze(mdat['kern_old']) @@ -459,10 +459,10 @@ def generalized_chains(self, input_obj, t_set, kern, if self.chain_length < 4: pass elif comm.rank == 0 and (batch+1)%(self.chain_length/4) == 0: - print "Current chain length: "+\ + logging.info("Current chain length: "+\ str(batch+1)+"/"+str(self.chain_length) disc._input_sample_set.append_values_local(input_new.\ - get_values_local()) + get_values_local())) disc._output_sample_set.append_values_local(output_new_values) all_step_ratios = np.concatenate((all_step_ratios, step_ratio)) mdat['step_ratios'] = all_step_ratios diff --git a/bet/sensitivity/chooseQoIs.py b/bet/sensitivity/chooseQoIs.py index 3acbbb71..09dd5ff4 100644 --- a/bet/sensitivity/chooseQoIs.py +++ b/bet/sensitivity/chooseQoIs.py @@ -283,7 +283,7 @@ def chooseOptQoIs_verbose(input_set, qoiIndices=None, num_qois_return=None, if comm.rank == 0: qoi_combs = np.array(list(combinations(list(qoiIndices), num_qois_return))) - print 'Possible sets of QoIs : ', qoi_combs.shape[0] + logging.info('Possible sets of QoIs : ', qoi_combs.shape[0]) qoi_combs = np.array_split(qoi_combs, comm.size) else: qoi_combs = None @@ -396,10 +396,10 @@ def find_unique_vecs(input_set, inner_prod_tol, qoiIndices=None, G = G/np.tile(norm_G, (input_dim, 1, 1)).transpose(1, 2, 0) if comm.rank == 0: - print '*** find_unique_vecs ***' - print 'num_zerovec : ', len(indz), 'of (', G.shape[1],\ - ') original QoIs' - print 'Possible QoIs : ', len(qoiIndices) - len(indz) + logging.info('*** find_unique_vecs ***') + logging.info('num_zerovec : ', len(indz), 'of (', G.shape[1],\ + ') original QoIs') + logging.info('Possible QoIs : ', len(qoiIndices) - len(indz)) qoiIndices = list(set(qoiIndices) - set(indz)) # Find all num_qois choose 2 pairs of QoIs @@ -423,7 +423,7 @@ def find_unique_vecs(input_set, inner_prod_tol, qoiIndices=None, unique_vecs = np.array(list(set(qoiIndices) - set(repeat_vec))) if comm.rank == 0: - print 'Unique QoIs : ', unique_vecs.shape[0] + logging.info('Unique QoIs : ', unique_vecs.shape[0]) return unique_vecs @@ -552,10 +552,10 @@ def find_good_sets(input_set, good_sets_prev, unique_indices, good_sets_new = np.append(good_sets_new, each[1:], axis=0) good_sets = good_sets_new - print 'Possible sets of QoIs of size %i : '%good_sets.shape[1],\ - np.sum(count_qois) - print 'Good sets of QoIs of size %i : '%good_sets.shape[1],\ - good_sets.shape[0] - 1 + logging.info('Possible sets of QoIs of size %i : '%good_sets.shape[1],\ + np.sum(count_qois)) + logging.info('Good sets of QoIs of size %i : '%good_sets.shape[1],\ + good_sets.shape[0] - 1) comm.Barrier() best_sets = comm.bcast(best_sets, root=0) @@ -664,7 +664,7 @@ def chooseOptQoIs_large_verbose(input_set, qoiIndices=None, unique_indices = find_unique_vecs(input_set, inner_prod_tol, qoiIndices, remove_zeros) if comm.rank == 0: - print 'Unique Indices are : ', unique_indices + logging.info('Unique Indices are : ', unique_indices) good_sets_curr = util.fix_dimensions_vector_2darray(unique_indices) best_sets = [] @@ -678,6 +678,6 @@ def chooseOptQoIs_large_verbose(input_set, qoiIndices=None, best_sets.append(best_sets_curr) optsingvals_list.append(optsingvals_tensor_curr) if comm.rank == 0: - print best_sets_curr + logging.info(best_sets_curr) return (best_sets, optsingvals_list)