diff --git a/Season1/10-11/dp.py b/Season1/10-11/dp.py index 03040f2..3a45abb 100644 --- a/Season1/10-11/dp.py +++ b/Season1/10-11/dp.py @@ -9,6 +9,13 @@ # 我们自己 import load +if(tf.__version__.startswith("1.")): + merge_all_summaries , scalar_summary , histogram_summary= tf.summary.merge_all , tf.summary.scalar , tf.summary.histogram + +else: + merge_all_summaries , scalar_summary , histogram_summary = tf.merge_all_summaries , tf.scalar_summary , tf.histogram_summary + + train_samples, train_labels = load._train_samples, load._train_labels test_samples, test_labels = load._test_samples, load._test_labels @@ -88,8 +95,8 @@ def define_graph(self): tf.truncated_normal([image_size * image_size, self.num_hidden], stddev=0.1), name='fc1_weights' ) fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), name='fc1_biases') - tf.histogram_summary('fc1_weights', fc1_weights) - tf.histogram_summary('fc1_biases', fc1_biases) + histogram_summary('fc1_weights', fc1_weights) + histogram_summary('fc1_biases', fc1_biases) # fully connected layer 2 --> output layer with tf.name_scope('fc2'): @@ -97,8 +104,8 @@ def define_graph(self): tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights' ) fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases') - tf.histogram_summary('fc2_weights', fc2_weights) - tf.histogram_summary('fc2_biases', fc2_biases) + histogram_summary('fc2_weights', fc2_weights) + histogram_summary('fc2_biases', fc2_biases) # 想在来定义图谱的运算 @@ -121,7 +128,7 @@ def model(data): self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels) ) - tf.scalar_summary('Loss', self.loss) + scalar_summary('Loss', self.loss) # Optimizer. @@ -133,7 +140,7 @@ def model(data): self.train_prediction = tf.nn.softmax(logits, name='train_prediction') self.test_prediction = tf.nn.softmax(model(self.tf_test_samples), name='test_prediction') - self.merged = tf.merge_all_summaries() + self.merged = merge_all_summaries() def run(self): ''' diff --git a/Season1/12-15/dp.py b/Season1/12-15/dp.py index 617a8d3..28a37e6 100644 --- a/Season1/12-15/dp.py +++ b/Season1/12-15/dp.py @@ -9,6 +9,13 @@ # 我们自己 import load +if(tf.__version__.startswith("1.")): + image_summary , scalar_summary= tf.summary.image , tf.summary.scalar + merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram +else: + image_summary , scalar_summary = tf.image_summary , tf.scalar_summary + merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary + train_samples, train_labels = load._train_samples, load._train_labels test_samples, test_labels = load._test_samples, load._test_labels @@ -121,15 +128,15 @@ def define_graph(self): [(image_size // down_scale) * (image_size // down_scale) * self.last_conv_depth, self.num_hidden], stddev=0.1)) fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden])) - self.train_summaries.append(tf.histogram_summary('fc1_weights', fc1_weights)) - self.train_summaries.append(tf.histogram_summary('fc1_biases', fc1_biases)) + self.train_summaries.append(histogram_summary('fc1_weights', fc1_weights)) + self.train_summaries.append(histogram_summary('fc1_biases', fc1_biases)) # fully connected layer 2 --> output layer with tf.name_scope('fc2'): fc2_weights = tf.Variable(tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights') fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases') - self.train_summaries.append(tf.histogram_summary('fc2_weights', fc2_weights)) - self.train_summaries.append(tf.histogram_summary('fc2_biases', fc2_biases)) + self.train_summaries.append(histogram_summary('fc2_weights', fc2_weights)) + self.train_summaries.append(histogram_summary('fc2_biases', fc2_biases)) # 想在来定义图谱的运算 def model(data, train=True): @@ -154,7 +161,7 @@ def model(data, train=True): filter_map = hidden[-1] filter_map = tf.transpose(filter_map, perm=[2, 0, 1]) filter_map = tf.reshape(filter_map, (self.conv1_depth, 32, 32, 1)) - self.test_summaries.append(tf.image_summary('conv1_relu', tensor=filter_map, max_images=self.conv1_depth)) + self.test_summaries.append(image_summary('conv1_relu', tensor=filter_map, max_images=self.conv1_depth)) with tf.name_scope('conv2_model'): with tf.name_scope('convolution'): @@ -206,7 +213,7 @@ def model(data, train=True): logits = model(self.tf_train_samples) with tf.name_scope('loss'): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)) - self.train_summaries.append(tf.scalar_summary('Loss', self.loss)) + self.train_summaries.append(scalar_summary('Loss', self.loss)) # Optimizer. with tf.name_scope('optimizer'): @@ -218,8 +225,8 @@ def model(data, train=True): with tf.name_scope('test'): self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction') - self.merged_train_summary = tf.merge_summary(self.train_summaries) - self.merged_test_summary = tf.merge_summary(self.test_summaries) + self.merged_train_summary = merge_summary(self.train_summaries) + self.merged_test_summary = merge_summary(self.test_summaries) def run(self): ''' diff --git a/Season1/12-15/dp_refined_api.py b/Season1/12-15/dp_refined_api.py index 0003d8d..dd825ed 100644 --- a/Season1/12-15/dp_refined_api.py +++ b/Season1/12-15/dp_refined_api.py @@ -3,6 +3,12 @@ from sklearn.metrics import confusion_matrix import numpy as np +if(tf.__version__.startswith("1.")): + image_summary , scalar_summary= tf.summary.image , tf.summary.scalar + merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram +else: + image_summary , scalar_summary = tf.image_summary , tf.scalar_summary + merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary class Network(): def __init__(self, train_batch_size, test_batch_size, pooling_scale): @@ -68,8 +74,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name): biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes])) self.fc_weights.append(weights) self.fc_biases.append(biases) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights)) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases)) + self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights)) + self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases)) # should make the definition as an exposed API, instead of implemented in the function def define_inputs(self, *, train_samples_shape, train_labels_shape, test_samples_shape): @@ -131,7 +137,7 @@ def model(data_flow, train=True): logits = model(self.tf_train_samples) with tf.name_scope('loss'): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)) - self.train_summaries.append(tf.scalar_summary('Loss', self.loss)) + self.train_summaries.append(scalar_summary('Loss', self.loss)) # Optimizer. with tf.name_scope('optimizer'): @@ -143,8 +149,8 @@ def model(data_flow, train=True): with tf.name_scope('test'): self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction') - self.merged_train_summary = tf.merge_summary(self.train_summaries) - self.merged_test_summary = tf.merge_summary(self.test_summaries) + self.merged_train_summary = merge_summary(self.train_summaries) + self.merged_test_summary = merge_summary(self.test_summaries) def run(self, data_iterator, train_samples, train_labels, test_samples, test_labels): ''' @@ -223,4 +229,4 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name): print(filter_map.get_shape()) filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1)) print(how_many) - self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many)) + self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many)) diff --git a/Season1/16-19/dp.py b/Season1/16-19/dp.py index 5744261..8b784fb 100644 --- a/Season1/16-19/dp.py +++ b/Season1/16-19/dp.py @@ -3,6 +3,12 @@ from sklearn.metrics import confusion_matrix import numpy as np +if(tf.__version__.startswith("1.")): + image_summary , scalar_summary= tf.summary.image , tf.summary.scalar + merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram +else: + image_summary , scalar_summary = tf.image_summary , tf.scalar_summary + merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary class Network(): def __init__(self, train_batch_size, test_batch_size, pooling_scale, @@ -71,8 +77,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name): biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes])) self.fc_weights.append(weights) self.fc_biases.append(biases) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights)) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases)) + self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights)) + self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases)) def apply_regularization(self, _lambda): # L2 regularization for the fully connected parameters @@ -149,7 +155,7 @@ def model(data_flow, train=True): with tf.name_scope('loss'): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)) self.loss += self.apply_regularization(_lambda=5e-4) - self.train_summaries.append(tf.scalar_summary('Loss', self.loss)) + self.train_summaries.append(scalar_summary('Loss', self.loss)) # learning rate decay global_step = tf.Variable(0) @@ -184,8 +190,8 @@ def model(data_flow, train=True): with tf.name_scope('test'): self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction') - self.merged_train_summary = tf.merge_summary(self.train_summaries) - self.merged_test_summary = tf.merge_summary(self.test_summaries) + self.merged_train_summary = merge_summary(self.train_summaries) + self.merged_test_summary = merge_summary(self.test_summaries) def run(self, data_iterator, train_samples, train_labels, test_samples, test_labels): ''' @@ -262,4 +268,4 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name): print(filter_map.get_shape()) filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1)) print(how_many) - self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many)) + self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many)) diff --git a/Season1/20/dp.py b/Season1/20/dp.py index 230be9d..593cdfe 100644 --- a/Season1/20/dp.py +++ b/Season1/20/dp.py @@ -3,6 +3,12 @@ from sklearn.metrics import confusion_matrix import numpy as np +if(tf.__version__.startswith("1.")): + image_summary , scalar_summary= tf.summary.image , tf.summary.scalar + merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram +else: + image_summary , scalar_summary = tf.image_summary , tf.scalar_summary + merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary class Network(): def __init__(self, train_batch_size, test_batch_size, pooling_scale, @@ -80,8 +86,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name): biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes])) self.fc_weights.append(weights) self.fc_biases.append(biases) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights)) - self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases)) + self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights)) + self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases)) def apply_regularization(self, _lambda): # L2 regularization for the fully connected parameters @@ -158,7 +164,7 @@ def model(data_flow, train=True): with tf.name_scope('loss'): self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)) self.loss += self.apply_regularization(_lambda=5e-4) - self.train_summaries.append(tf.scalar_summary('Loss', self.loss)) + self.train_summaries.append(scalar_summary('Loss', self.loss)) # learning rate decay global_step = tf.Variable(0) @@ -198,8 +204,8 @@ def model(data_flow, train=True): self.single_prediction = tf.nn.softmax(model(single_input, train=False), name='single_prediction') tf.add_to_collection("prediction", self.single_prediction) - self.merged_train_summary = tf.merge_summary(self.train_summaries) - self.merged_test_summary = tf.merge_summary(self.test_summaries) + self.merged_train_summary = merge_summary(self.train_summaries) + self.merged_test_summary = merge_summary(self.test_summaries) # 放在定义Graph之后,保存这张计算图 self.saver = tf.train.Saver(tf.all_variables()) @@ -327,7 +333,7 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name): #print(filter_map.get_shape()) filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1)) #print(how_many) - self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many)) + self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many)) def print_confusion_matrix(self, confusionMatrix): print('Confusion Matrix:')