-
Notifications
You must be signed in to change notification settings - Fork 66
/
save.py
41 lines (29 loc) · 1.06 KB
/
save.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import tensorflow as tf
import json
from model import model_fn
"""The purpose of this script is to export a savedmodel."""
CONFIG = 'config.json'
OUTPUT_FOLDER = 'export/run00'
GPU_TO_USE = '0'
WIDTH, HEIGHT = None, None
# size of an input image,
# set (None, None) if you want inference
# for images of variable size
tf.logging.set_verbosity('INFO')
params = json.load(open(CONFIG))
model_params = params['model_params']
config = tf.ConfigProto()
config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=model_params['model_dir'],
session_config=config
)
estimator = tf.estimator.Estimator(model_fn, params=model_params, config=run_config)
def serving_input_receiver_fn():
images = tf.placeholder(dtype=tf.uint8, shape=[None, HEIGHT, WIDTH, 3], name='image_tensor')
features = {'images': tf.to_float(images)*(1.0/255.0)}
return tf.estimator.export.ServingInputReceiver(features, {'images': images})
estimator.export_savedmodel(
OUTPUT_FOLDER, serving_input_receiver_fn
)