Skip to content

Commit

Permalink
More linting: indents and quote marks
Browse files Browse the repository at this point in the history
jezsadler committed Nov 3, 2023
1 parent 0719017 commit 5154edf
Showing 10 changed files with 86 additions and 80 deletions.
6 changes: 3 additions & 3 deletions src/omlt/gbt/gbt_formulation.py
Original file line number Diff line number Diff line change
@@ -200,11 +200,11 @@ def _branching_y(tree_id, branch_node_id):
if len(branch_value) != 1:
raise ValueError(
f"The given tree_id and branch_node_id do not uniquely identify a branch value."
)
)
if len(feature_id) != 1:
raise ValueError(
f"The given tree_id and branch_node_id do not uniquely identify a feature."
)
)
feature_id = feature_id[0]
branch_value = branch_value[0]
(branch_y_idx,) = np.where(
@@ -213,7 +213,7 @@ def _branching_y(tree_id, branch_node_id):
if len(branch_y_idx) != 1:
raise ValueError(
f"The given tree_id and branch_node_id do not uniquely identify a branch index."
)
)
return block.y[feature_id, branch_y_idx[0]]

def _sum_of_z_l(tree_id, start_node_id):
4 changes: 2 additions & 2 deletions src/omlt/gbt/model.py
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@ def _model_num_inputs(model):
if len(graph.input) != 1:
raise ValueError(
f"Model graph input field is multi-valued {graph.input}. A single value is required."
)
)
return _tensor_size(graph.input[0])


@@ -69,7 +69,7 @@ def _model_num_outputs(model):
if len(graph.output) != 1:
raise ValueError(
f"Model graph output field is multi-valued {graph.output}. A single value is required."
)
)
return _tensor_size(graph.output[0])


67 changes: 33 additions & 34 deletions src/omlt/io/onnx_parser.py
Original file line number Diff line number Diff line change
@@ -76,7 +76,7 @@ def parse_network(self, graph, scaling_object, input_bounds):
if dim_value is None:
raise ValueError(
f'All dimensions in graph "{graph.name}" input tensor have 0 value.'
)
)
assert network_input is None
network_input = InputLayer(size)
self._node_map[input.name] = network_input
@@ -120,7 +120,7 @@ def parse_network(self, graph, scaling_object, input_bounds):
else:
raise ValueError(
f'Nodes must have inputs or have op_type "Constant". Node "{node.name}" has no inputs and op_type "{node.op_type}".'
)
)

# traverse graph
self._node_stack = list(inputs)
@@ -179,11 +179,11 @@ def _consume_dense_nodes(self, node, next_nodes):
if node.op_type != "MatMul":
raise ValueError(
f"{node.name} is a {node.op_type} node, only MatMul nodes can be used as starting points for consumption."
)
)
if len(node.input) != 2:
raise ValueError(
f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption."
)
)

[in_0, in_1] = list(node.input)
input_layer, transformer = self._node_input_and_transformer(in_0)
@@ -192,7 +192,7 @@ def _consume_dense_nodes(self, node, next_nodes):
if len(next_nodes) != 1:
raise ValueError(
f"Next nodes must have length 1, {next_nodes} has length {len(next_nodes)}"
)
)

# expect 'Add' node ahead
type_, node, maybe_next_nodes = self._nodes[next_nodes[0]]
@@ -201,7 +201,7 @@ def _consume_dense_nodes(self, node, next_nodes):
if node.op_type != "Add":
raise ValueError(
f"The first node to be consumed, {node.name}, is a {node.op_type} node. Only Add nodes are supported."
)
)

# extract biases
next_nodes = maybe_next_nodes
@@ -219,12 +219,11 @@ def _consume_dense_nodes(self, node, next_nodes):
if node_weights.shape[1] != node_biases.shape[0]:
raise ValueError(
f"Node weights has {node_weights.shape[1]} columns; node biases has {node_biases.shape[0]} rows. These must be equal."
)
)
if len(node.output) != 1:
raise ValueError(
f"Node output is {node.output} but should be a single value."
)

)

input_output_size = _get_input_output_size(input_layer, transformer)

@@ -257,11 +256,11 @@ def _consume_gemm_dense_nodes(self, node, next_nodes):
if node.op_type != "Gemm":
raise ValueError(
f"{node.name} is a {node.op_type} node, only Gemm nodes can be used as starting points for consumption."
)
)
if len(node.input) != 3:
raise ValueError(
f"{node.name} input has {len(node.input)} dimensions, only nodes with 3 input dimensions can be used as starting points for consumption."
)
)

attr = _collect_attributes(node)
alpha = attr["alpha"]
@@ -312,11 +311,11 @@ def _consume_conv_nodes(self, node, next_nodes):
if node.op_type != "Conv":
raise ValueError(
f"{node.name} is a {node.op_type} node, only Conv nodes can be used as starting points for consumption."
)
if len(node.input) not in [2,3]:
)
if len(node.input) not in [2, 3]:
raise ValueError(
f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption."
)
)

if len(node.input) == 2:
[in_0, in_1] = list(node.input)
@@ -340,39 +339,39 @@ def _consume_conv_nodes(self, node, next_nodes):
if attr["kernel_shape"] != kernel_shape:
raise ValueError(
f"Kernel shape attribute {attr['kernel_shape']} does not match initialized kernel shape {kernel_shape}."
)
)
if len(kernel_shape) != len(strides):
raise ValueError(
f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal."
)
)
if len(input_output_size) != len(kernel_shape) + 1:
raise ValueError(
f"Input/output size ({input_output_size}) must have one more dimension than initialized kernel shape ({kernel_shape})."
)
)

# Check input, output have correct dimensions
if biases.shape != (out_channels,):
raise ValueError(
f"Biases shape {biases.shape} must match output weights channels {(out_channels,)}."
)
)
if in_channels != input_output_size[0]:
raise ValueError(
f"Input/output size ({input_output_size}) first dimension must match input weights channels ({in_channels})."
)
)

# Other attributes are not supported
if "dilations" in attr and attr["dilations"] != [1, 1]:
raise ValueError(
f"{node} has non-identity dilations ({attr['dilations']}). This is not supported."
)
)
if attr["group"] != 1:
raise ValueError(
f"{node} has multiple groups ({attr['group']}). This is not supported."
)
)
if "pads" in attr and np.any(attr["pads"]):
raise ValueError(
f"{node} has non-zero pads ({attr['pads']}). This is not supported."
)
)

# generate new nodes for the node output
padding = 0
@@ -395,7 +394,7 @@ def _consume_conv_nodes(self, node, next_nodes):
if len(input_output_size) != 3:
raise ValueError(
f"Expected a 2D image with channels, got {input_output_size}."
)
)

conv_layer = ConvLayer2D(
input_output_size,
@@ -415,11 +414,11 @@ def _consume_reshape_nodes(self, node, next_nodes):
if node.op_type != "Reshape":
raise ValueError(
f"{node.name} is a {node.op_type} node, only Reshape nodes can be used as starting points for consumption."
)
)
if len(node.input) != 2:
raise ValueError(
f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption."
)
)
[in_0, in_1] = list(node.input)
input_layer = self._node_map[in_0]
new_shape = self._constants[in_1]
@@ -436,18 +435,18 @@ def _consume_pool_nodes(self, node, next_nodes):
if node.op_type not in _POOLING_OP_TYPES:
raise ValueError(
f"{node.name} is a {node.op_type} node, only MaxPool nodes can be used as starting points for consumption."
)
)
pool_func_name = "max"

# ONNX network should not contain indices output from MaxPool - not supported by OMLT
if len(node.output) != 1:
raise ValueError(
f"The ONNX contains indices output from MaxPool. This is not supported by OMLT."
)
)
if len(node.input) != 1:
raise ValueError(
f"{node.name} input has {len(node.input)} dimensions, only nodes with 1 input dimension can be used as starting points for consumption."
)
)

input_layer, transformer = self._node_input_and_transformer(node.input[0])
input_output_size = _get_input_output_size(input_layer, transformer)
@@ -459,7 +458,7 @@ def _consume_pool_nodes(self, node, next_nodes):
if input_output_size[0] != 1:
raise ValueError(
f"{node.name} has {input_output_size[0]} batches, only a single batch is supported."
)
)
input_output_size = input_output_size[1:]

in_channels = input_output_size[0]
@@ -474,23 +473,23 @@ def _consume_pool_nodes(self, node, next_nodes):
if "dilations" in attr and attr["dilations"] != [1, 1]:
raise ValueError(
f"{node.name} has non-identity dilations ({attr['dilations']}). This is not supported."
)
)
if "pads" in attr and np.any(attr["pads"]):
raise ValueError(
f"{node.name} has non-zero pads ({attr['pads']}). This is not supported."
)
)
if ("auto_pad" in attr) and (attr["auto_pad"] != "NOTSET"):
raise ValueError(
f"{node.name} has autopad set ({attr['auto_pad']}). This is not supported."
)
)
if len(kernel_shape) != len(strides):
raise ValueError(
f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal."
)
)
if len(input_output_size) != len(kernel_shape) + 1:
raise ValueError(
f"Input/output size ({input_output_size}) must have one more dimension than kernel shape ({kernel_shape})."
)
)

output_shape_wrapper = math.floor
if "ceil_mode" in attr and attr["ceil_mode"] == 1:
14 changes: 7 additions & 7 deletions src/omlt/neuralnet/layer.py
Original file line number Diff line number Diff line change
@@ -23,14 +23,14 @@ class Layer:
def __init__(
self, input_size, output_size, *, activation=None, input_index_mapper=None
):
if not isinstance(input_size, (list,tuple)):
if not isinstance(input_size, (list, tuple)):
raise TypeError(
f"input_size must be a list or tuple, {type(input_size)} was provided."
)
if not isinstance(output_size, (list,tuple)):
)
if not isinstance(output_size, (list, tuple)):
raise TypeError(
f"output_size must be a list or tuple, {type(output_size)} was provided."
)
)
self.__input_size = list(input_size)
self.__output_size = list(output_size)
self.activation = activation
@@ -108,7 +108,7 @@ def eval_single_layer(self, x):
if x_reshaped.shape != tuple(self.input_size):
raise ValueError(
f"Layer requires an input size {self.input_size}, but the input tensor had size {x_reshaped.shape}."
)
)
y = self._eval(x_reshaped)
return self._apply_activation(y)

@@ -321,7 +321,7 @@ def _eval(self, x):
if len(self.output_size) != 3:
raise ValueError(
f"Output should have 3 dimensions but instead has {len(self.output_size)}"
)
)
[depth, rows, cols] = list(self.output_size)
for out_d in range(depth):
for out_r in range(rows):
@@ -380,7 +380,7 @@ def __init__(
if pool_func_name not in PoolingLayer2D._POOL_FUNCTIONS:
raise ValueError(
f"Allowable pool functions are {PoolingLayer2D._POOL_FUNCTIONS}, {pool_func_name} was provided."
)
)
self._pool_func_name = pool_func_name
self._kernel_shape = kernel_shape
self._kernel_depth = kernel_depth
4 changes: 2 additions & 2 deletions src/omlt/neuralnet/layers/full_space.py
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer):
if succ_layer.activation != "linear":
raise ValueError(
f"Activation is applied after convolution layer, but the successor max pooling layer {succ_layer} has an activation function also."
)
)
succ_layer.activation = layer.activation
layer.activation = "linear"

@@ -122,7 +122,7 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer):
if input_layer.activation != "linear":
raise ValueError(
"Non-increasing activation functions on the preceding convolutional layer are not supported."
)
)
# TODO - add support for non-increasing activation functions on preceding convolutional layer

# note kernel indexes are the same set of values for any output index, so wlog get kernel indexes for (0, 0, 0)
2 changes: 1 addition & 1 deletion src/omlt/neuralnet/layers/partition_based.py
Original file line number Diff line number Diff line change
@@ -47,7 +47,7 @@ def partition_based_dense_relu_layer(net_block, net, layer_block, layer, split_f
if len(prev_layers) == 0:
raise ValueError(
f"Layer {layer} is not an input layer, but has no predecessors."
)
)
elif len(prev_layers) > 1:
raise ValueError(f"Layer {layer} has multiple predecessors.")
prev_layer = prev_layers[0]
2 changes: 1 addition & 1 deletion src/omlt/neuralnet/layers/reduced_space.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation):
if len(prev_layers) == 0:
raise ValueError(
f"Layer {layer} is not an input layer, but has no predecessors."
)
)
elif len(prev_layers) > 1:
raise ValueError(f"Layer {layer} has multiple predecessors.")
prev_layer = prev_layers[0]
56 changes: 31 additions & 25 deletions tests/io/test_onnx_parser.py
Original file line number Diff line number Diff line change
@@ -150,36 +150,41 @@ def test_consume_wrong_node_type(datadir):

with pytest.raises(ValueError) as excinfo:
parser._consume_dense_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2],
)
expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MatMul nodes can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_dense

with pytest.raises(ValueError) as excinfo:
parser._consume_gemm_dense_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2],
)
expected_msg_gemm = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Gemm nodes can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_gemm

with pytest.raises(ValueError) as excinfo:
parser._consume_conv_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2],
)
expected_msg_conv = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Conv nodes can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_conv

with pytest.raises(ValueError) as excinfo:
parser._consume_reshape_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2],
)
expected_msg_reshape = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Reshape nodes can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_reshape

with pytest.raises(ValueError) as excinfo:
parser._consume_pool_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2],
)
expected_msg_pool = """StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MaxPool nodes can be used as starting points for consumption."""
assert str(excinfo.value) == expected_msg_pool

@@ -190,11 +195,12 @@ def test_consume_dense_wrong_dims(datadir):
parser = NetworkParser()
parser.parse_network(model.graph, None, None)

parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][1].input.append('abcd')
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][1].input.append("abcd")
with pytest.raises(ValueError) as excinfo:
parser._consume_dense_nodes(
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][1],
parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][2])
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][1],
parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][2],
)
expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_dense

@@ -204,11 +210,11 @@ def test_consume_gemm_wrong_dims(datadir):
model = onnx.load(datadir.file("gemm.onnx"))
parser = NetworkParser()
parser.parse_network(model.graph, None, None)
parser._nodes['Gemm_0'][1].input.append('abcd')
parser._nodes["Gemm_0"][1].input.append("abcd")
with pytest.raises(ValueError) as excinfo:
parser._consume_gemm_dense_nodes(
parser._nodes['Gemm_0'][1], parser._nodes['Gemm_0'][2]
)
parser._nodes["Gemm_0"][1], parser._nodes["Gemm_0"][2]
)
expected_msg_gemm = "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_gemm

@@ -218,11 +224,11 @@ def test_consume_conv_wrong_dims(datadir):
model = onnx.load(datadir.file("convx1_gemmx1.onnx"))
parser = NetworkParser()
parser.parse_network(model.graph, None, None)
parser._nodes['Conv_0'][1].input.append('abcd')
parser._nodes["Conv_0"][1].input.append("abcd")
with pytest.raises(ValueError) as excinfo:
parser._consume_conv_nodes(
parser._nodes['Conv_0'][1], parser._nodes['Conv_0'][2]
)
parser._nodes["Conv_0"][1], parser._nodes["Conv_0"][2]
)
expected_msg_conv = "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption."
assert str(excinfo.value) == expected_msg_conv

@@ -232,11 +238,11 @@ def test_consume_reshape_wrong_dims(datadir):
model = onnx.load(datadir.file("convx1_gemmx1.onnx"))
parser = NetworkParser()
parser.parse_network(model.graph, None, None)
parser._nodes['Reshape_2'][1].input.append('abcd')
parser._nodes["Reshape_2"][1].input.append("abcd")
with pytest.raises(ValueError) as excinfo:
parser._consume_reshape_nodes(
parser._nodes['Reshape_2'][1], parser._nodes['Reshape_2'][2]
)
parser._nodes["Reshape_2"][1], parser._nodes["Reshape_2"][2]
)
expected_msg_reshape = """Reshape_2 input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption."""
assert str(excinfo.value) == expected_msg_reshape

@@ -246,10 +252,10 @@ def test_consume_maxpool_wrong_dims(datadir):
model = onnx.load(datadir.file("maxpool_2d.onnx"))
parser = NetworkParser()
parser.parse_network(model.graph, None, None)
parser._nodes['node1'][1].input.append('abcd')
parser._nodes["node1"][1].input.append("abcd")
with pytest.raises(ValueError) as excinfo:
parser._consume_pool_nodes(
parser._nodes['node1'][1], parser._nodes['node1'][2]
)
parser._nodes["node1"][1], parser._nodes["node1"][2]
)
expected_msg_maxpool = """node1 input has 2 dimensions, only nodes with 1 input dimension can be used as starting points for consumption."""
assert str(excinfo.value) == expected_msg_maxpool
1 change: 1 addition & 0 deletions tests/neuralnet/test_network_definition.py
Original file line number Diff line number Diff line change
@@ -172,6 +172,7 @@ def _test_add_invalid_edge(direction):
expected_msg = f"Outbound layer {dense_layer_1} not found in network."
assert str(excinfo.value) == expected_msg


def test_add_invalid_edge():
_test_add_invalid_edge("in")
_test_add_invalid_edge("out")
10 changes: 5 additions & 5 deletions tests/neuralnet/test_nn_formulation.py
Original file line number Diff line number Diff line change
@@ -548,7 +548,7 @@ def test_partition_based_unbounded_below():
with pytest.raises(ValueError) as excinfo:
partition_based_dense_relu_layer(
m.neural_net_block, net, m.neural_net_block, test_layer, split_func
)
)
expected_msg = "Expression is unbounded below."
assert str(excinfo.value) == expected_msg

@@ -570,7 +570,7 @@ def test_partition_based_unbounded_above():
with pytest.raises(ValueError) as excinfo:
partition_based_dense_relu_layer(
m.neural_net_block, net, m.neural_net_block, test_layer, split_func
)
)
expected_msg = "Expression is unbounded above."
assert str(excinfo.value) == expected_msg

@@ -590,7 +590,7 @@ def test_partition_based_bias_unbounded_below():
with pytest.raises(ValueError) as excinfo:
partition_based_dense_relu_layer(
m.neural_net_block, net, m.neural_net_block, test_layer, split_func
)
)
expected_msg = "Expression is unbounded below."
assert str(excinfo.value) == expected_msg

@@ -610,7 +610,7 @@ def test_partition_based_bias_unbounded_above():
with pytest.raises(ValueError) as excinfo:
partition_based_dense_relu_layer(
m.neural_net_block, net, m.neural_net_block, test_layer, split_func
)
)
expected_msg = "Expression is unbounded above."
assert str(excinfo.value) == expected_msg

@@ -733,7 +733,7 @@ def test_maxpool2d_bad_input_activation():
with pytest.raises(ValueError) as excinfo:
full_space_maxpool2d_layer(
m.neural_net_block, net, m.neural_net_block, maxpool_layer_1
)
)
expected_msg = """Non-increasing activation functions on the preceding convolutional layer are not supported."""
assert str(excinfo.value) == expected_msg

0 comments on commit 5154edf

Please sign in to comment.