diff --git a/runtime/backend/interface.cpp b/runtime/backend/interface.cpp index d7f0489db5..84c0bb82d4 100644 --- a/runtime/backend/interface.cpp +++ b/runtime/backend/interface.cpp @@ -7,12 +7,12 @@ */ #include -#include namespace executorch { namespace runtime { -PyTorchBackendInterface::~PyTorchBackendInterface() {} +// Pure-virtual dtors still need an implementation. +BackendInterface::~BackendInterface() {} namespace { @@ -31,7 +31,7 @@ size_t num_registered_backends = 0; } // namespace -PyTorchBackendInterface* get_backend_class(const char* name) { +BackendInterface* get_backend_class(const char* name) { for (size_t i = 0; i < num_registered_backends; i++) { Backend backend = registered_backends[i]; if (strcmp(backend.name, name) == 0) { diff --git a/runtime/backend/interface.h b/runtime/backend/interface.h index 0b77283a35..c0305f68cd 100644 --- a/runtime/backend/interface.h +++ b/runtime/backend/interface.h @@ -39,9 +39,9 @@ struct CompileSpec { */ using DelegateHandle = void; -class PyTorchBackendInterface { +class BackendInterface { public: - virtual ~PyTorchBackendInterface() = 0; + virtual ~BackendInterface() = 0; /** * Returns true if the backend is available to process delegation calls. @@ -52,19 +52,19 @@ class PyTorchBackendInterface { * Responsible to further process (compile/transform/optimize) the compiled * unit that was produced, ahead-of-time, as well as perform any backend * initialization to ready it for execution. This method is called every time - * the PyTorch program is initialized. Consequently, this is the place to + * the ExecuTorch program is initialized. Consequently, this is the place to * perform any backend initialization as well as transformations, * optimizations, and even compilation that depend on the target device. As * such, it is strongly encouraged to push as much processing as possible to * the ahead-of-time processing. * - * @param[in] processed An opaque (to PyTorch) compiled unit from the - * preprocessor. Can contain anything the backend needs to execute the - * equivalent semantics of the passed-in Module and its method. Often - * passed unmodified to `execute()` as a `DelegateHandle`, unless it needs - * further processing at init time to be fully executable. If the data is - * not needed after init(), calling processed->Free() can reclaim its - * memory. + * @param[in] processed An opaque (to ExecuTorch) backend-specific compiled + * unit from the preprocessor. Can contain anything the backend needs to + * execute the equivalent semantics of the passed-in Module and its + * method. Often passed unmodified to `execute()` as a `DelegateHandle`, + * unless it needs further processing at init time to be fully executable. + * If the data is not needed after init(), calling processed->Free() can + * reclaim its memory. * @param[in] compile_specs The exact same compiler specification that * was used ahead-of-time to produce `processed`. * @@ -115,11 +115,10 @@ class PyTorchBackendInterface { * The mapping is populated using register_backend method. * * @param[in] name Name of the user-defined backend delegate. - * @retval Pointer to the appropriate object that implements - * PyTorchBackendInterface. Nullptr if it can't find anything - * with the given name. + * @retval Pointer to the appropriate object that implements BackendInterface. + * Nullptr if it can't find anything with the given name. */ -PyTorchBackendInterface* get_backend_class(const char* name); +BackendInterface* get_backend_class(const char* name); /** * A named instance of a backend. @@ -128,12 +127,12 @@ struct Backend { /// The name of the backend. Must match the string used in the PTE file. const char* name; /// The instance of the backend to use when loading and executing programs. - PyTorchBackendInterface* backend; + BackendInterface* backend; }; /** - * Registers the Backend object (i.e. string name and PyTorchBackendInterface - * pair) so that it could be called via the name during the runtime. + * Registers the Backend object (i.e. string name and BackendInterface pair) so + * that it could be called via the name during the runtime. * * @param[in] backend Backend object * @retval Error code representing whether registration was successful. @@ -151,8 +150,8 @@ using ::executorch::runtime::Backend; using ::executorch::runtime::CompileSpec; using ::executorch::runtime::DelegateHandle; using ::executorch::runtime::get_backend_class; -using ::executorch::runtime::PyTorchBackendInterface; using ::executorch::runtime::register_backend; using ::executorch::runtime::SizedBuffer; +using PyTorchBackendInterface = ::executorch::runtime::BackendInterface; } // namespace executor } // namespace torch diff --git a/runtime/executor/method.cpp b/runtime/executor/method.cpp index 717f6fb7f7..d39ba87553 100644 --- a/runtime/executor/method.cpp +++ b/runtime/executor/method.cpp @@ -58,7 +58,7 @@ class BackendDelegate final { ET_CHECK_OR_RETURN_ERROR( delegate.id() != nullptr, InvalidProgram, "Missing backend id"); const char* backend_id = delegate.id()->c_str(); - PyTorchBackendInterface* backend = get_backend_class(backend_id); + BackendInterface* backend = get_backend_class(backend_id); ET_CHECK_OR_RETURN_ERROR( backend != nullptr, NotFound, @@ -198,7 +198,7 @@ class BackendDelegate final { } FreeableBuffer segment_; - const PyTorchBackendInterface* backend_; + const BackendInterface* backend_; DelegateHandle* handle_; }; diff --git a/runtime/executor/test/backend_integration_test.cpp b/runtime/executor/test/backend_integration_test.cpp index e3902bb9bc..9180d77aa3 100644 --- a/runtime/executor/test/backend_integration_test.cpp +++ b/runtime/executor/test/backend_integration_test.cpp @@ -31,6 +31,7 @@ using namespace ::testing; using exec_aten::ArrayRef; using executorch::runtime::BackendExecutionContext; using executorch::runtime::BackendInitContext; +using executorch::runtime::BackendInterface; using executorch::runtime::CompileSpec; using executorch::runtime::DataLoader; using executorch::runtime::DelegateHandle; @@ -40,7 +41,6 @@ using executorch::runtime::FreeableBuffer; using executorch::runtime::MemoryAllocator; using executorch::runtime::Method; using executorch::runtime::Program; -using executorch::runtime::PyTorchBackendInterface; using executorch::runtime::Result; using executorch::runtime::testing::ManagedMemoryManager; using torch::executor::util::FileDataLoader; @@ -48,9 +48,9 @@ using torch::executor::util::FileDataLoader; /** * A backend class whose methods can be overridden individually. */ -class StubBackend final : public PyTorchBackendInterface { +class StubBackend final : public BackendInterface { public: - // Function signature types that match the PyTorchBackendInterface methods. + // Function signature types that match the BackendInterface methods. using IsAvailableFn = std::function; using InitFn = std::function( FreeableBuffer*, @@ -325,7 +325,7 @@ class BackendIntegrationTest : public ::testing::TestWithParam { }; TEST_P(BackendIntegrationTest, BackendIsPresent) { - PyTorchBackendInterface* backend = + BackendInterface* backend = executorch::runtime::get_backend_class(StubBackend::kName); ASSERT_EQ(backend, &StubBackend::singleton()); } diff --git a/runtime/executor/test/test_backend_compiler_lib.cpp b/runtime/executor/test/test_backend_compiler_lib.cpp index 20028b2dc5..7bfd7689a4 100644 --- a/runtime/executor/test/test_backend_compiler_lib.cpp +++ b/runtime/executor/test/test_backend_compiler_lib.cpp @@ -17,13 +17,13 @@ using executorch::runtime::ArrayRef; using executorch::runtime::Backend; using executorch::runtime::BackendExecutionContext; using executorch::runtime::BackendInitContext; +using executorch::runtime::BackendInterface; using executorch::runtime::CompileSpec; using executorch::runtime::DelegateHandle; using executorch::runtime::Error; using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; using executorch::runtime::MemoryAllocator; -using executorch::runtime::PyTorchBackendInterface; using executorch::runtime::Result; struct DemoOp { @@ -38,7 +38,7 @@ struct DemoOpList { size_t numops; }; -class BackendWithCompiler final : public PyTorchBackendInterface { +class BackendWithCompiler final : public BackendInterface { int max_shape = 4; public: diff --git a/runtime/executor/test/test_backend_with_delegate_mapping.cpp b/runtime/executor/test/test_backend_with_delegate_mapping.cpp index ba580c98d7..ead99c1305 100644 --- a/runtime/executor/test/test_backend_with_delegate_mapping.cpp +++ b/runtime/executor/test/test_backend_with_delegate_mapping.cpp @@ -18,13 +18,13 @@ using executorch::runtime::ArrayRef; using executorch::runtime::Backend; using executorch::runtime::BackendExecutionContext; using executorch::runtime::BackendInitContext; +using executorch::runtime::BackendInterface; using executorch::runtime::CompileSpec; using executorch::runtime::DelegateHandle; using executorch::runtime::Error; using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; using executorch::runtime::MemoryAllocator; -using executorch::runtime::PyTorchBackendInterface; using executorch::runtime::Result; struct DemoOp { @@ -37,7 +37,7 @@ struct DemoOpList { size_t numops; }; -class BackendWithDelegateMapping final : public PyTorchBackendInterface { +class BackendWithDelegateMapping final : public BackendInterface { public: ~BackendWithDelegateMapping() override = default;