Skip to content

Commit

Permalink
WIP. Fix dropout transformer
Browse files Browse the repository at this point in the history
  • Loading branch information
dboyliao committed Jun 11, 2019
1 parent ddca306 commit 9bb77c5
Show file tree
Hide file tree
Showing 8 changed files with 69 additions and 11 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
]},
install_requires=[
'Jinja2',
'tensorflow',
'tensorflow==1.13.1',
'idx2numpy',
'attrs',
'click',
Expand Down
41 changes: 41 additions & 0 deletions tests/test_transformer/test_dropout/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from random import sample

import numpy as np
import pytest

import tensorflow as tf
from utensor_cgen.frontend.tensorflow import GraphDefParser
from utensor_cgen.utils import random_str


Expand All @@ -28,3 +31,41 @@ def dropout_graph_tuple2():
rate = tf.placeholder(dtype=tf.float32, name='rate')
drop = tf.nn.dropout(x, rate=rate, name=random_str(10))
return graph.as_graph_def(), [drop.op.name]


@pytest.fixture(name='vgg_ugraph')
def gen_vgg_graph():
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(dtype=tf.float32, shape=[None, 2048, 2048, 3], name='input_x')
rate = tf.placeholder(dtype=tf.float32, name='rate')
in_feat = x
num_layers = sample([3, 4, 5], 1)[0]
for i in range(1, num_layers+1):
ksize = sample([2, 3, 5], 1)[0]
in_channel = in_feat.shape.as_list()[-1]
out_channel = sample([3, 5, 10], 1)[0]
stride = sample([1, 2], 1)[0]
kernel = tf.constant(
np.random.rand(ksize, ksize, in_channel, out_channel),
dtype=tf.float32,
name='kernel_{}'.format(i)
)
in_feat = tf.nn.conv2d(
in_feat,
kernel,
strides=[1, stride, stride, 1],
padding='VALID',
name='feat_map_{}'.format(i)
)
in_feat = tf.nn.relu(in_feat, name='relu_{}'.format(i))
in_feat = tf.nn.max_pool(
in_feat,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
name='pool_{}'.format(i),
padding='SAME',
)
if i != num_layers:
in_feat = tf.nn.dropout(in_feat, rate=rate, name='dropout_{}'.format(i))
return GraphDefParser.parse(graph.as_graph_def(), output_nodes=[in_feat.op.name])
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,12 @@ def test_dropout_trans_2(dropout_graph_tuple2):
new_ugraph = trans.transform(ugraph)
assert len(new_ugraph.ops_info) == 1
assert 'x' in new_ugraph.ops_info


def test_dropout_vgg(vgg_ugraph):
trans = DropoutTransformer()
from utensor_cgen.ir.misc.graph_viz import viz_graph
viz_graph(trans.pattern_ugraph, 'dropout_pattern')
new_ugraph = trans.transform(vgg_ugraph)
for op_name in new_ugraph.ops_info:
assert not op_name.startswith('dropout')
2 changes: 1 addition & 1 deletion utensor_cgen/backend/operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class OperatorFactory():
def createOperatorSnippet(self, op_info, **kwargs):
op_type = op_info.op_type
if op_type not in self._operators:
err_msg = "unsupported op type in uTensor: {}".format(op_type)
err_msg = "unsupported op type in uTensor: {op.name}, {op.op_type}".format(op=op_info)
raise ValueError(err_msg)

op = self._operators[op_type](op_info, **kwargs) # Create desired object
Expand Down
4 changes: 3 additions & 1 deletion utensor_cgen/ir/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def move_into(self, ugraph):
"""
self._ugraph = ugraph

@attr.s(cmp=False)
@attr.s(cmp=False, repr=False)
class OperationInfo(IRBase, _NoShallowCopyMixin):
"""
name : str
Expand Down Expand Up @@ -281,6 +281,8 @@ def __getitem__(self, tensor_idx):
)
return self.output_tensors[tensor_idx]

def __repr__(self):
return str((self.name, self.op_type))

class MetaOperationInfo(OperationInfo):

Expand Down
3 changes: 2 additions & 1 deletion utensor_cgen/logger.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
#-*- coding: utf8 -*-
import logging
import os
import sys

__all__ = ['logger']


logger = logging.getLogger(name='utensor-cli')
logger.setLevel(logging.INFO)
logger.setLevel(os.environ.get('UTENSOR_LOG_LEVEL', logging.INFO))
_fmt = logging.Formatter(fmt='[%(levelname)s %(filename)s %(funcName)s @ %(lineno)s] %(message)s')
_handler = logging.StreamHandler(sys.stdout)
_handler.formatter = _fmt
Expand Down
3 changes: 2 additions & 1 deletion utensor_cgen/matcher/_matcher_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from utensor_cgen.ir import (MetaOperationInfo, OperationInfo, uTensorGraph,
uTensorGraphView)
from utensor_cgen.logger import logger
from utensor_cgen.matcher._morphism import Morphism
from utensor_cgen.utils import (ops_bfs_queue, prune_graph, random_str,
topologic_order_graph)
Expand Down Expand Up @@ -462,4 +463,4 @@ def is_done(self):
a state is done, if
1. the patrn_bfs_queue is empty
"""
return not self.patrn_bfs_queue
return not (self.patrn_bfs_queue and self.sub_bfs_queue)
16 changes: 10 additions & 6 deletions utensor_cgen/transformer/ns_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ def transform(self, ugraph):
elif op_type == 'BiasAdd':
op_info = ugraph.ops_info[node_name]
op_info.op_type = 'Add'


return ugraph


Expand All @@ -59,12 +57,18 @@ class DropoutTransformer(Transformer):
def pattern_ugraph(self):
graph = tf.Graph()
with graph.as_default():
dummy_x = tf.constant(np.random.rand(10), dtype=tf.float32, name='dummy_x')
dummy_rate = tf.constant(0.5, dtype=tf.float32, name='dummy_rate')
dummy_x = tf.constant(np.random.rand(10, 10), dtype=tf.float32, name='dummy_x')
dummy_rate = tf.placeholder(dtype=tf.float32, name='dummy_rate')
dropout = tf.nn.dropout(dummy_x, rate=dummy_rate, name='dropout')
patrn_ugraph = GraphDefParser.parse(graph.as_graph_def(), output_nodes=[dropout.op.name])
patrn_ugraph['dropout/truediv'].replace_with_null_input_tensor(0) # replace dummy_x
patrn_ugraph['dropout/sub'].replace_with_null_input_tensor(1) # replce dummy_rate
from utensor_cgen.ir.misc.graph_viz import viz_graph
viz_graph(patrn_ugraph, 'ori_pattrn')
# replace dummy_x
patrn_ugraph['dropout/truediv'].replace_with_null_input_tensor(0)
# # replace dummy_rate
patrn_ugraph['dropout/sub'].replace_with_null_input_tensor(1)
# # replace other tensors
patrn_ugraph['dropout/random_uniform/RandomUniform'].replace_with_null_input_tensor(0)
patrn_ugraph = prune_graph(patrn_ugraph)
topologic_order_graph(patrn_ugraph)
return patrn_ugraph
Expand Down

0 comments on commit 9bb77c5

Please sign in to comment.