Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix TensorFlow API usage to be compatible for v0.x and v1.x #36

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions Season1/10-11/dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@
# 我们自己
import load

if(tf.__version__.startswith("1.")):
merge_all_summaries , scalar_summary , histogram_summary= tf.summary.merge_all , tf.summary.scalar , tf.summary.histogram

else:
merge_all_summaries , scalar_summary , histogram_summary = tf.merge_all_summaries , tf.scalar_summary , tf.histogram_summary


train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels

Expand Down Expand Up @@ -88,17 +95,17 @@ def define_graph(self):
tf.truncated_normal([image_size * image_size, self.num_hidden], stddev=0.1), name='fc1_weights'
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), name='fc1_biases')
tf.histogram_summary('fc1_weights', fc1_weights)
tf.histogram_summary('fc1_biases', fc1_biases)
histogram_summary('fc1_weights', fc1_weights)
histogram_summary('fc1_biases', fc1_biases)

# fully connected layer 2 --> output layer
with tf.name_scope('fc2'):
fc2_weights = tf.Variable(
tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights'
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases')
tf.histogram_summary('fc2_weights', fc2_weights)
tf.histogram_summary('fc2_biases', fc2_biases)
histogram_summary('fc2_weights', fc2_weights)
histogram_summary('fc2_biases', fc2_biases)


# 想在来定义图谱的运算
Expand All @@ -121,7 +128,7 @@ def model(data):
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)
)
tf.scalar_summary('Loss', self.loss)
scalar_summary('Loss', self.loss)


# Optimizer.
Expand All @@ -133,7 +140,7 @@ def model(data):
self.train_prediction = tf.nn.softmax(logits, name='train_prediction')
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples), name='test_prediction')

self.merged = tf.merge_all_summaries()
self.merged = merge_all_summaries()

def run(self):
'''
Expand Down
23 changes: 15 additions & 8 deletions Season1/12-15/dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@
# 我们自己
import load

if(tf.__version__.startswith("1.")):
image_summary , scalar_summary= tf.summary.image , tf.summary.scalar
merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram
else:
image_summary , scalar_summary = tf.image_summary , tf.scalar_summary
merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary

train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels

Expand Down Expand Up @@ -121,15 +128,15 @@ def define_graph(self):
[(image_size // down_scale) * (image_size // down_scale) * self.last_conv_depth, self.num_hidden], stddev=0.1))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]))

self.train_summaries.append(tf.histogram_summary('fc1_weights', fc1_weights))
self.train_summaries.append(tf.histogram_summary('fc1_biases', fc1_biases))
self.train_summaries.append(histogram_summary('fc1_weights', fc1_weights))
self.train_summaries.append(histogram_summary('fc1_biases', fc1_biases))

# fully connected layer 2 --> output layer
with tf.name_scope('fc2'):
fc2_weights = tf.Variable(tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights')
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases')
self.train_summaries.append(tf.histogram_summary('fc2_weights', fc2_weights))
self.train_summaries.append(tf.histogram_summary('fc2_biases', fc2_biases))
self.train_summaries.append(histogram_summary('fc2_weights', fc2_weights))
self.train_summaries.append(histogram_summary('fc2_biases', fc2_biases))

# 想在来定义图谱的运算
def model(data, train=True):
Expand All @@ -154,7 +161,7 @@ def model(data, train=True):
filter_map = hidden[-1]
filter_map = tf.transpose(filter_map, perm=[2, 0, 1])
filter_map = tf.reshape(filter_map, (self.conv1_depth, 32, 32, 1))
self.test_summaries.append(tf.image_summary('conv1_relu', tensor=filter_map, max_images=self.conv1_depth))
self.test_summaries.append(image_summary('conv1_relu', tensor=filter_map, max_images=self.conv1_depth))

with tf.name_scope('conv2_model'):
with tf.name_scope('convolution'):
Expand Down Expand Up @@ -206,7 +213,7 @@ def model(data, train=True):
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
self.train_summaries.append(scalar_summary('Loss', self.loss))

# Optimizer.
with tf.name_scope('optimizer'):
Expand All @@ -218,8 +225,8 @@ def model(data, train=True):
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')

self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
self.merged_train_summary = merge_summary(self.train_summaries)
self.merged_test_summary = merge_summary(self.test_summaries)

def run(self):
'''
Expand Down
18 changes: 12 additions & 6 deletions Season1/12-15/dp_refined_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,12 @@
from sklearn.metrics import confusion_matrix
import numpy as np

if(tf.__version__.startswith("1.")):
image_summary , scalar_summary= tf.summary.image , tf.summary.scalar
merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram
else:
image_summary , scalar_summary = tf.image_summary , tf.scalar_summary
merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary

class Network():
def __init__(self, train_batch_size, test_batch_size, pooling_scale):
Expand Down Expand Up @@ -68,8 +74,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name):
biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes]))
self.fc_weights.append(weights)
self.fc_biases.append(biases)
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases))
self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases))

# should make the definition as an exposed API, instead of implemented in the function
def define_inputs(self, *, train_samples_shape, train_labels_shape, test_samples_shape):
Expand Down Expand Up @@ -131,7 +137,7 @@ def model(data_flow, train=True):
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
self.train_summaries.append(scalar_summary('Loss', self.loss))

# Optimizer.
with tf.name_scope('optimizer'):
Expand All @@ -143,8 +149,8 @@ def model(data_flow, train=True):
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')

self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
self.merged_train_summary = merge_summary(self.train_summaries)
self.merged_test_summary = merge_summary(self.test_summaries)

def run(self, data_iterator, train_samples, train_labels, test_samples, test_labels):
'''
Expand Down Expand Up @@ -223,4 +229,4 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name):
print(filter_map.get_shape())
filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1))
print(how_many)
self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many))
self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many))
18 changes: 12 additions & 6 deletions Season1/16-19/dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,12 @@
from sklearn.metrics import confusion_matrix
import numpy as np

if(tf.__version__.startswith("1.")):
image_summary , scalar_summary= tf.summary.image , tf.summary.scalar
merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram
else:
image_summary , scalar_summary = tf.image_summary , tf.scalar_summary
merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary

class Network():
def __init__(self, train_batch_size, test_batch_size, pooling_scale,
Expand Down Expand Up @@ -71,8 +77,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name):
biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes]))
self.fc_weights.append(weights)
self.fc_biases.append(biases)
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases))
self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases))

def apply_regularization(self, _lambda):
# L2 regularization for the fully connected parameters
Expand Down Expand Up @@ -149,7 +155,7 @@ def model(data_flow, train=True):
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.loss += self.apply_regularization(_lambda=5e-4)
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
self.train_summaries.append(scalar_summary('Loss', self.loss))

# learning rate decay
global_step = tf.Variable(0)
Expand Down Expand Up @@ -184,8 +190,8 @@ def model(data_flow, train=True):
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')

self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
self.merged_train_summary = merge_summary(self.train_summaries)
self.merged_test_summary = merge_summary(self.test_summaries)

def run(self, data_iterator, train_samples, train_labels, test_samples, test_labels):
'''
Expand Down Expand Up @@ -262,4 +268,4 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name):
print(filter_map.get_shape())
filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1))
print(how_many)
self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many))
self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many))
18 changes: 12 additions & 6 deletions Season1/20/dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,12 @@
from sklearn.metrics import confusion_matrix
import numpy as np

if(tf.__version__.startswith("1.")):
image_summary , scalar_summary= tf.summary.image , tf.summary.scalar
merge_summary , histogram_summary = tf.summary.merge , tf.summary.histogram
else:
image_summary , scalar_summary = tf.image_summary , tf.scalar_summary
merge_summary , histogram_summary = tf.merge_summary , tf.histogram_summary

class Network():
def __init__(self, train_batch_size, test_batch_size, pooling_scale,
Expand Down Expand Up @@ -80,8 +86,8 @@ def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name):
biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes]))
self.fc_weights.append(weights)
self.fc_biases.append(biases)
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases))
self.train_summaries.append(histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(histogram_summary(str(len(self.fc_biases))+'_biases', biases))

def apply_regularization(self, _lambda):
# L2 regularization for the fully connected parameters
Expand Down Expand Up @@ -158,7 +164,7 @@ def model(data_flow, train=True):
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.loss += self.apply_regularization(_lambda=5e-4)
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
self.train_summaries.append(scalar_summary('Loss', self.loss))

# learning rate decay
global_step = tf.Variable(0)
Expand Down Expand Up @@ -198,8 +204,8 @@ def model(data_flow, train=True):
self.single_prediction = tf.nn.softmax(model(single_input, train=False), name='single_prediction')
tf.add_to_collection("prediction", self.single_prediction)

self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
self.merged_train_summary = merge_summary(self.train_summaries)
self.merged_test_summary = merge_summary(self.test_summaries)

# 放在定义Graph之后,保存这张计算图
self.saver = tf.train.Saver(tf.all_variables())
Expand Down Expand Up @@ -327,7 +333,7 @@ def visualize_filter_map(self, tensor, *, how_many, display_size, name):
#print(filter_map.get_shape())
filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1))
#print(how_many)
self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many))
self.test_summaries.append(image_summary(name, tensor=filter_map, max_images=how_many))

def print_confusion_matrix(self, confusionMatrix):
print('Confusion Matrix:')
Expand Down