Skip to content

Commit

Permalink
[Python] Refactor native bindings and loader to improve code coverage (
Browse files Browse the repository at this point in the history
  • Loading branch information
VivekPanyam authored Apr 11, 2020
1 parent 997b6ca commit e9ed202
Show file tree
Hide file tree
Showing 4 changed files with 147 additions and 13 deletions.
39 changes: 38 additions & 1 deletion source/neuropod/bindings/neuropod_native.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,24 @@ namespace neuropod
namespace
{

// A mapping between numpy types and Neuropod types
// TODO(vip): Share this with config_utils.cc
const std::unordered_map<std::string, TensorType> type_mapping = {
{"float32", FLOAT_TENSOR},
{"float64", DOUBLE_TENSOR},
{"string", STRING_TENSOR},

{"int8", INT8_TENSOR},
{"int16", INT16_TENSOR},
{"int32", INT32_TENSOR},
{"int64", INT64_TENSOR},

{"uint8", UINT8_TENSOR},
{"uint16", UINT16_TENSOR},
{"uint32", UINT32_TENSOR},
{"uint64", UINT64_TENSOR},
};

py::dict infer(Neuropod &neuropod, py::dict &inputs_dict)
{
// Convert from a py::dict of numpy arrays to an unordered_map of `NeuropodTensor`s
Expand Down Expand Up @@ -127,7 +145,26 @@ PYBIND11_MODULE(neuropod_native, m)
.def(py::init([](const std::string & path,
const std::vector<BackendLoadSpec> &default_backend_overrides,
py::kwargs kwargs) { return make_neuropod(kwargs, path, default_backend_overrides); }))
.def("infer", &infer);
.def("infer", &infer)
.def("get_inputs", &Neuropod::get_inputs)
.def("get_outputs", &Neuropod::get_outputs)
.def("get_name", &Neuropod::get_name)
.def("get_platform", &Neuropod::get_platform);

py::class_<TensorSpec>(m, "TensorSpec")
.def_readonly("name", &TensorSpec::name)
.def_readonly("type", &TensorSpec::type)
.def_readonly("dims", &TensorSpec::dims);

py::class_<Dimension>(m, "Dimension")
.def_readonly("value", &Dimension::value)
.def_readonly("symbol", &Dimension::symbol);

auto type_enum = py::enum_<TensorType>(m, "TensorType");
for (const auto &item : type_mapping)
{
type_enum = type_enum.value(item.first.c_str(), item.second);
}

py::class_<BackendLoadSpec>(m, "BackendLoadSpec")
.def(py::init<const std::string &, const std::string &, const std::string &>());
Expand Down
108 changes: 99 additions & 9 deletions source/neuropod/python/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

from neuropod.registry import _REGISTERED_BACKENDS
from neuropod.utils.dtype_utils import maybe_convert_bindings_types
from neuropod.backends.neuropod_executor import NeuropodExecutor

# Add the script's directory to the PATH so we can find the worker binary
os.environ["PATH"] += ":" + os.path.dirname(os.path.realpath(__file__))
Expand Down Expand Up @@ -63,7 +62,25 @@ def load_installed_backends():
load_installed_backends()


class NativeNeuropodExecutor(NeuropodExecutor):
def _convert_native_shape_to_list(dims):
"""
Takes a list of `neuropod_native.Dimension` objects and converts to a list of python types
"""
out = []
for dim in dims:
if dim.value == -2:
# It's a symbol
out.append(dim.symbol)
elif dim.value == -1:
# Any shape is okay
out.append(None)
else:
out.append(dim.value)

return out


class NativeNeuropodExecutor:
"""
Executes a Neuropod using the native bindings
"""
Expand All @@ -72,21 +89,94 @@ def __init__(self, neuropod_path, **kwargs):
"""
Load a Neuropod using the native bindings
:param neuropod_path: The path to a python neuropod package
:param neuropod_path: The path to a neuropod package
"""
super(NativeNeuropodExecutor, self).__init__(neuropod_path)

# Load the model
from neuropod.neuropod_native import Neuropod as NeuropodNative

self.model = NeuropodNative(
neuropod_path, _REGISTERED_BACKENDS, use_ope=True, **kwargs
)

def forward(self, inputs):
@property
def name(self):
"""
Get the name of the loaded neuropod.
"""
return self.model.get_name()

@property
def platform(self):
"""
Get the platform of backend of the loaded neuropod.
"""
return self.model.get_platform()

@property
def inputs(self):
"""
Get the inputs of the loaded neuropod. Returns a list of dicts representing
the format of the expected input to the neuropod.
Ex: [{"name": "x", "dtype": "float32", "shape": [None,]}]
"""
out = []
for item in self.model.get_inputs():
out.append(
{
"name": item.name,
"dtype": item.type.name,
"shape": _convert_native_shape_to_list(item.dims),
}
)

return out

@property
def outputs(self):
"""
Get the outputs of the loaded neuropod. Returns a list of dicts representing
the format of the output of the neuropod.
Ex: [{"name": "z", "dtype": "float32", "shape": [None,]}]
"""
out = []
for item in self.model.get_outputs():
out.append(
{
"name": item.name,
"dtype": item.type.name,
"shape": _convert_native_shape_to_list(item.dims),
}
)

return out

def infer(self, inputs):
"""
Run inference using the specifed inputs.
:param inputs: A dict mapping input names to values. This must match the input
spec in the neuropod config for the loaded model.
Ex: {'x1': np.array([5]), 'x2': np.array([6])}
*Note:* all the keys in this dict must be strings and all the
values must be numpy arrays
:returns: A dict mapping output names to values. This is checked to ensure that it
matches the spec in the neuropod config for the loaded model. All the keys
in this dict are strings and all the values are numpy arrays.
"""
inputs = maybe_convert_bindings_types(inputs)
return self.model.infer(inputs)

def __enter__(self):
# Needed in order to be used as a contextmanager
return self

def __exit__(self, *args):
# Needed in order to be used as a contextmanager
pass


def load_neuropod(neuropod_path, _always_use_native=True, **kwargs):
"""
Expand All @@ -98,12 +188,12 @@ def load_neuropod(neuropod_path, _always_use_native=True, **kwargs):
to `None` will attempt to run this model on CPU.
:param load_custom_ops: Whether or not to load custom ops included in the model.
"""
# If we were given a zipfile, extract it to a temp dir and use it
neuropod_path = zip_loader.extract_neuropod_if_necessary(neuropod_path)

if _always_use_native:
return NativeNeuropodExecutor(neuropod_path, **kwargs)

# If we were given a zipfile, extract it to a temp dir and use it
neuropod_path = zip_loader.extract_neuropod_if_necessary(neuropod_path)

# Figure out what type of neuropod this is
neuropod_config = config_utils.read_neuropod_config(neuropod_path)
platform = neuropod_config["platform"]
Expand Down
11 changes: 9 additions & 2 deletions source/neuropod/python/tests/test_randomify.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#
import numpy as np
import os
import six
import shutil
import unittest
from tempfile import mkdtemp
Expand Down Expand Up @@ -61,14 +62,20 @@ def test_some_inputs(self):
self.assertGreater(result["out_string_vector"].shape[0], 0)

def test_invalid_input_name(self):
with self.assertRaises(ValueError):
with six.assertRaisesRegex(
self, (ValueError, RuntimeError), "are not found in the input spec"
):
neuropod = load_neuropod(TestSpecValidation.neuropod_path)
neuropod.infer(
{"bogus": np.asarray([[1.1, 2.2], [0, 1], [2, 3]], dtype=np.float32)}
)

def test_invalid_shape(self):
with self.assertRaises(ValueError):
with six.assertRaisesRegex(
self,
(ValueError, RuntimeError),
"in the input spec is expected to have 2 dimensions, but had 1",
):
neuropod = load_neuropod(TestSpecValidation.neuropod_path)
neuropod.infer({"in_float32_matrix": np.asarray([3], dtype=np.float32)})

Expand Down
2 changes: 1 addition & 1 deletion source/neuropod/python/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def check_addition_model(neuropod_path):
Validate that the inputs and outputs of the loaded neuropod match
the problem spec
"""
with load_neuropod(neuropod_path, _always_use_native=False) as neuropod:
with load_neuropod(neuropod_path) as neuropod:
target = get_addition_model_spec()

# Validate that the specs match
Expand Down

0 comments on commit e9ed202

Please sign in to comment.