Skip to content

Commit

Permalink
Tidied up Ethos-U delegate binaries (pytorch#1046)
Browse files Browse the repository at this point in the history
Summary:
fixed pytorch#677
simplified and made binary format more robust/validate-able and moved more processing to AoT
made runtime input/output support more general (arbitrary number of EValue inputs and outputs)

Pull Request resolved: pytorch#1046

Reviewed By: SS-JIA

Differential Revision: D50599184

Pulled By: digantdesai

fbshipit-source-id: 908d8598745b0b4b99c4001cc42c6a74c251e563
  • Loading branch information
robell authored and facebook-github-bot committed Oct 24, 2023
1 parent d8e9b26 commit cc1a8bd
Show file tree
Hide file tree
Showing 7 changed files with 284 additions and 224 deletions.
5 changes: 4 additions & 1 deletion backends/arm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ set(THIRD_PARTY_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/third-party")
set(DRIVER_ETHOSU_INCLUDE_DIR "${THIRD_PARTY_ROOT}/ethos-u-core-driver/include")
include_directories( ${DRIVER_ETHOSU_INCLUDE_DIR} )

set(_arm_baremetal_sources backends/arm/runtime/ArmBackendEthosU.cpp)
set(_arm_baremetal_sources
backends/arm/runtime/ArmBackendEthosU.cpp
backends/arm/runtime/VelaBinStream.cpp
)
list(TRANSFORM _arm_baremetal_sources PREPEND "${EXECUTORCH_ROOT}/")

add_library(
Expand Down
98 changes: 52 additions & 46 deletions backends/arm/arm_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,27 @@ def dbg_tosa_dump(tosa_fb, path):
f.write(js)


# Output to Vela with current file-based compilation
# WARNING: if this changes, the runtime reader also needs to change
# Pack either input or output tensor block, compose the related arrays into
# per-io structs to simplify runtime use.
def vela_bin_pack_io(prefix, data):
ios = struct.pack("<i", len(data[prefix + "_shape"]))
for i in range(len(data[prefix + "_shape"])):
io_shape = data[prefix + "_shape"][i]
io_elem_size = data[prefix + "_elem_size"][i]
io_offset = data[prefix + "_offset"][i]
io_region = data[prefix + "_region"][i]
assert len(io_shape) <= 4
inp_pad = io_shape.tolist() + [0] * (4 - len(io_shape))
io_struct = struct.pack(
"<iiiiiii", *inp_pad, io_elem_size, io_offset, io_region
)
ios += io_struct
return ios


# Output via Vela to binary stream for ArmBackendEthosU
# WARNING: Do not change this without changing VelaBinStream.cpp as that
# function consumes this format and the two need to align.
def vela_compile(tosa_fb):
with tempfile.TemporaryDirectory() as tmpdir:
tosaname = "out.tosa"
Expand All @@ -162,65 +181,52 @@ def vela_compile(tosa_fb):

np_path = os.path.join(tmpdir, "output", "out_sg0_vela.npz")
blocks = b""

with np.load(np_path, allow_pickle=False) as data:
# Construct our modified output_blocks with data in a form easily
# digested on the device side
bin_blocks = {"vela_bin_stream": b""}

# copy command data through unmodified
bin_blocks["cmd_data"] = data["cmd_data"].tobytes()

# copy weight data through unmodified
bin_blocks["weight_data"] = data["weight_data"].tobytes()

# Add a block for scratch, inputs and outputs; scratch shape is a 1 element
# array giving us size in bytes so extract this and add a block of 0's.
# Currently we preallocated this on the host to provide SRAM for computation.
if len(data["scratch_shape"][0]) != 1:
raise RuntimeError("Expected scratch to be single array")
block_length = data["scratch_shape"][0].item()
bin_blocks["scratch_data"] = b"\x00" * block_length

# Capture inputs and outputs
bin_blocks["inputs"] = vela_bin_pack_io("input", data)
bin_blocks["outputs"] = vela_bin_pack_io("output", data)

bin_blocks["vela_end_stream"] = b""

# Emit the NPZ regions as:
# - 16 byte block name null terminated string (padded to 16 if name shorter)
# - 4 bytes of int32 block length and 12 bytes of 0's
# - block data (padded to 16 byte alignment at end)
# Repeat for all blocks
for key in data.keys():
for key in bin_blocks.keys():
block_name = bytes(key, "utf8")[:15]
block_name = block_name + b"\x00" * (16 - len(block_name))

block_data = b""
if key in ("input_shape", "output_shape"):
inputs = data[key]
# Encode a struct of int len; and one or more int x,y,z,w shape;
input_struct = struct.pack("<i", len(inputs))
for inp in inputs:
assert len(inp) <= 4
inp_pad = inp.tolist() + [0] * (4 - len(inp))
input_struct = input_struct + struct.pack("<iiii", *inp_pad)
block_data = input_struct
elif key in ("input_offset", "output_offset"):
inputs = data[key]
if key == "output_offset" and len(inputs) > 1:
raise RuntimeError(
"Currently only support one output in Vela ArmBackend"
)
offset_struct = struct.pack("<i", len(inputs))
for inp in inputs:
offset_struct = offset_struct + struct.pack("<i", inp)
block_data = offset_struct
else:
block_data = data[key].tobytes()
# We need the acual unpadded block lengths for hw setup
block_length = len(block_data).to_bytes(16, "little")
# pad block data to multiple of 16 bytes
block_length = struct.pack("<iiii", len(bin_blocks[key]), 0, 0, 0)

# Pad block data to multiple of 16 bytes
block_data = bin_blocks[key]
block_data = block_data + b"\x00" * (15 - (len(block_data) - 1) % 16)

block = block_name + block_length + block_data
blocks = blocks + block

# Add a block for scratch, inputs and outputs
# scratch shape is a 1 element array giving us size in bytes
block_name = bytes("scratch_data", "utf8")[:15]
block_name = block_name + b"\x00" * (16 - len(block_name))
block_length = data["scratch_shape"][0].item()
block_length = block_length + (15 - (block_length - 1) % 16)
block_data = b"\x00" * block_length
block_length = block_length.to_bytes(16, "little")
block = block_name + block_length + block_data
blocks = blocks + block
# TODO are these already in scratch shape? look to be
# input_shape * input_elem_size
# output_shape * output_elem_size
# input_offset and output_offset specify the location these arrays are written from base of scratch

# return 16 byte VELA bin header + blocks + footer
header = bytes("vela_bin_stream", "utf-8") + b"\x00"
footer = bytes("vela_end_stream", "utf-8") + b"\x00"
return header + blocks + footer
return blocks


def dbg_fail(node, tosa_fb, path):
Expand Down
174 changes: 22 additions & 152 deletions backends/arm/runtime/ArmBackendEthosU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@
*/

#include <cstring>
#include <memory>
#include <vector>

#include <executorch/runtime/backend/interface.h>
#include <executorch/runtime/core/error.h>
#include <executorch/runtime/core/evalue.h>

#include <executorch/backends/arm/runtime/VelaBinStream.h>

#include <ethosu_driver.h>
#include <pmu_ethosu.h>

Expand Down Expand Up @@ -52,29 +52,14 @@ class ArmBackend final : public PyTorchBackendInterface {

char* data = (char*)processed->data();
size_t size = processed->size();
char* foot = data + size - 16;
char* foot = data + size - sizeof(VelaBinBlock);

// Header and footer both 16 bit aligned suggest valid structure and we
// wont walk off the end of the chunks and segfault
if (!((int)data == next_mul_16((uintptr_t)data))) {
ET_LOG(Error, "ArmBackend::init: Binary needs to be 16 byte unaligned");
return Error::InvalidProgram;
}
if (!((int)foot == next_mul_16((uintptr_t)foot))) {
ET_LOG(Error, "ArmBackend::init: Footer expected to be 16 byte aligned");
ET_LOG(
Error,
"ArmBackend::init: Program expected to be multiple of 16 bytes");
return Error::InvalidProgram;
}
if (!(0 == strncmp(data, "vela_bin_stream", 15))) {
ET_LOG(Error, "ArmBackend::init: Binary passed is not a vela_bin_stream");
return Error::InvalidProgram;
}
if (!(0 == strncmp(foot, "vela_end_stream", 15))) {
ET_LOG(Error, "ArmBackend::init: Binary passed missing vela_end_stream");
// Verify format of vela_bin
if (vela_bin_validate(data, size) == false) {
ET_LOG(Error, "Malformed vela_bin_stream found");
return Error::InvalidProgram;
}

// Verify address range is accessible current expectation is the program
// is wholly stored in SRAM
// TODO: expect to improve capabilities here by supporting DRAM storage
Expand Down Expand Up @@ -108,7 +93,7 @@ class ArmBackend final : public PyTorchBackendInterface {
char* data = (char*)processed->data();

// Read key sections from the vela_bin_stream
if (!this->vela_read(data, &handles, processed->size())) {
if (vela_bin_read(data, &handles, processed->size()) == false) {
ET_LOG(Error, "ArmBackend::vela_read: error, invalid binary layout");
return Error::InvalidProgram;
}
Expand All @@ -124,8 +109,9 @@ class ArmBackend final : public PyTorchBackendInterface {
handles.scratch_data_size);

// Write inputs into SRAM scratch area defined by Vela
for (int i = 0; i < handles.input_shapes.size(); i++) {
const char* input_addr = handles.scratch_data + handles.input_offset[i];
for (int i = 0; i < handles.inputs->count; i++) {
const char* input_addr =
handles.scratch_data + handles.inputs->io[i].offset;
// Process input EValue into scratch
// TODO: Optimise into direct write from Vela into the SRAM or DRAM output
// for compatible data layouts.
Expand Down Expand Up @@ -168,25 +154,17 @@ class ArmBackend final : public PyTorchBackendInterface {
return Error::InvalidProgram;
}

// output data from Ethos U
// We only handle one output at the moment
const char* output_addr = handles.scratch_data + handles.output_offset[0];
// Outputs are in the index immediately after inputs
int output_index = handles.input_shapes.size();

if (handles.output_shapes.size() != 1) {
ET_LOG(
Error,
"ArmBackend::execute: currently only support one return tensor");
return Error::InvalidProgram;
}
// Process results into EValue storage
// TODO: optimise into direct write for compatible, contig layout
int* output_address = (int*)output_addr;
auto tensor_out = args[output_index]->toTensor();
for (int j = 0; j < tensor_out.numel(); j++) {
// TODO: extend beyond tensors with 4 byte elements
tensor_out.mutable_data_ptr<int>()[j] = output_address[j];
// Write outputs from scratch into EValue pointers
for (int i = 0; i < handles.outputs->count; i++) {
const char* output_addr =
handles.scratch_data + handles.outputs->io[i].offset;
// Process input EValue into scratch
int* output_address = (int*)output_addr;
// Outputs are in the index immediately after inputs
auto tensor_out = args[handles.inputs->count + i]->toTensor();
for (int j = 0; j < tensor_out.numel(); j++) {
tensor_out.mutable_data_ptr<int>()[j] = output_address[j];
}
}

return Error::Ok;
Expand All @@ -195,114 +173,6 @@ class ArmBackend final : public PyTorchBackendInterface {
void destroy(DelegateHandle* handle) const override {
return;
}

private:
typedef struct {
const char* cmd_data;
size_t cmd_data_size;
const char* weight_data;
size_t weight_data_size;
const char* scratch_data;
size_t scratch_data_size;
vector<size_t> input_offset;
vector<vector<int>> input_shapes;
vector<size_t> output_offset;
vector<vector<int>> output_shapes;
} VelaHandles;

typedef struct {
char name[16];
uint32_t size;
char _pad[12];
char data[];
} VelaBinBlock;

typedef struct {
int count;
int shape[][4];
} VelaShapes;

typedef struct {
int count;
int offsets[];
} VelaOffsets;

static int next_mul_16(int n) {
return ((n - 1) | 15) + 1;
}

int vela_read(char* data, VelaHandles* handles, int size) const {
constexpr const size_t header_size = 16;

// Read header string
if (strncmp(data, "vela_bin_stream", 15)) {
return 0;
}
data += header_size;

// Expect one or more 'VelaBinBlock's
while (1) {
VelaBinBlock* b = (VelaBinBlock*)data;
data += sizeof(VelaBinBlock) + next_mul_16(b->size);

// Exit with success on finding end of stream
if (!strncmp(b->name, "vela_end_stream", strlen("vela_end_stream")))
return 1;

if (!strncmp(b->name, "cmd_data", strlen("cmd_data"))) {
// This magic header confirms a valid command stream in binary
if (strncmp(b->data, "COP1", strlen("COP1")))
return 0;
handles->cmd_data = b->data;
handles->cmd_data_size = b->size;
}
if (!strncmp(b->name, "weight_data", strlen("weight_data"))) {
handles->weight_data = b->data;
handles->weight_data_size = b->size;
}
if (!strncmp(b->name, "scratch_data", strlen("scratch_data"))) {
handles->scratch_data = b->data;
handles->scratch_data_size = b->size;
}

// capture inputs and outputs
if (!strncmp(b->name, "input_offset", strlen("input_offset"))) {
VelaOffsets* offsets = (VelaOffsets*)b->data;
for (int i = 0; i < offsets->count; i++) {
handles->input_offset.push_back(offsets->offsets[i]);
}
}
if (!strncmp(b->name, "output_offset", strlen("output_offset"))) {
VelaOffsets* offsets = (VelaOffsets*)b->data;
for (int i = 0; i < offsets->count; i++) {
handles->output_offset.push_back(offsets->offsets[i]);
}
}

if (!strncmp(b->name, "input_shape", strlen("input_shape"))) {
VelaShapes* shapes = (VelaShapes*)b->data;
for (int i = 0; i < shapes->count; i++) {
vector<int> s = {
shapes->shape[i][0],
shapes->shape[i][1],
shapes->shape[i][2],
shapes->shape[i][3]};
handles->input_shapes.push_back(s);
}
}
if (!strncmp(b->name, "output_shape", strlen("output_shape"))) {
VelaShapes* shapes = (VelaShapes*)b->data;
for (int i = 0; i < shapes->count; i++) {
vector<int> s = {
shapes->shape[i][0],
shapes->shape[i][1],
shapes->shape[i][2],
shapes->shape[i][3]};
handles->output_shapes.push_back(s);
}
}
}
}
};

namespace {
Expand Down
Loading

0 comments on commit cc1a8bd

Please sign in to comment.