diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index dc584a9799079c..585ee997e3b585 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -24,8 +24,6 @@ jobs: run: | python3 -m pip install --upgrade pip python3 -m pip install -r ./src/bindings/python/requirements.txt - # Add for -DENABLE_PYTHON=ON, no cython - python3 -m pip install -r ./src/bindings/python/src/compatibility/openvino/requirements-dev.txt # Run cmake with -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT in order to enable codestyle check for ITT collector - name: CMake configure diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 50114d986b23c6..9e38183cf186ea 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,8 +41,6 @@ jobs: python3 -m pip install --upgrade pip python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running Paddle frontend unit tests python3 -m pip install -r ${{ github.workspace }}/src/frontends/paddle/tests/requirements.txt # For running ONNX frontend unit tests diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 19d32ef74e07c9..84434981be989d 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -94,7 +94,6 @@ jobs: python3 -m pip install -U pip # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f16b97d20c5c3b..51a59b35f51cd6 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -128,7 +128,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt @@ -204,7 +203,6 @@ jobs: run: | /usr/bin/python3.8 -m pip install -U pip /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt cmake -UPYTHON* \ -DENABLE_PYTHON_PACKAGING=ON \ -DENABLE_TESTS=OFF \ diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index b8d7709fd36a62..fd8403e0de6c53 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -127,7 +127,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt @@ -203,7 +202,6 @@ jobs: run: | /usr/bin/python3.8 -m pip install -U pip /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt cmake -UPYTHON* \ -DENABLE_PYTHON_PACKAGING=ON \ -DENABLE_TESTS=OFF \ diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 5db7ed22a02707..088fddccf1b210 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -108,7 +108,6 @@ jobs: python3 -m venv ${OPENVINO_BUILD_DIR}/env source ${OPENVINO_BUILD_DIR}/env/bin/activate python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt python3 -m pip install conan - name: Install RISC-V native debian packages diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index c61fe6a4a9cc83..0165980d1b2f57 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -116,9 +116,6 @@ jobs: python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 02d494c7c16522..64873a9b104138 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -115,9 +115,6 @@ jobs: python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/py_checks.yml b/.github/workflows/py_checks.yml index 80d6cad5243af3..56b58faacf0c16 100644 --- a/.github/workflows/py_checks.yml +++ b/.github/workflows/py_checks.yml @@ -53,42 +53,6 @@ jobs: name: samples_diff path: samples_diff.diff - # IE Python API Flake code-style - - name: Run flake8 on IE Python API - run: python -m flake8 ./ --config=setup.cfg - working-directory: src/bindings/python/src/compatibility/openvino - - - name: Create code style diff for IE Python API - if: failure() - run: | - python -m black -l 160 -S ./ - git diff > ie_python_diff.diff - working-directory: src/bindings/python/src/compatibility/openvino - - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: ie_python_diff - path: ie_python_diff.diff - - # nGraph Python API Flake code-style - - name: Run flake8 on nGraph Python API - run: python -m flake8 ./src/compatibility/ngraph --config=setup.cfg - working-directory: src/bindings/python - - - name: Create code style diff for nGraph Python API - if: failure() - run: | - python -m black -l 160 -S ./ - git diff > pyngraph_diff.diff - working-directory: src/bindings/python/src/compatibility/ngraph - - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: pyngraph_diff - path: pyngraph_diff.diff - # Python API 2.0 Flake code-style - name: Run flake8 on Python API 2.0 run: python -m flake8 ./src/openvino --config=setup.cfg @@ -131,25 +95,11 @@ jobs: run: python -m flake8 tests/ --config=setup.cfg working-directory: src/bindings/python - # IE Python API mypy check - - name: Run mypy on IE Python API - run: python -m mypy ./ --config-file ./setup.cfg - working-directory: src/bindings/python/src/compatibility/openvino - - # nGraph Python API mypy check - - name: Run mypy on nGraph Python API - run: python -m mypy ./src/compatibility/ngraph --config-file ./setup.cfg - working-directory: src/bindings/python - # Python API 2.0 mypy check - name: Run mypy on Python API 2.0 run: python -m mypy ./src/openvino --config-file ./setup.cfg working-directory: src/bindings/python - - name: Run Bandit - run: python -m bandit -r ./ -f screen - working-directory: src/bindings/python/src/compatibility/openvino - # layer_tests Flake code-style - name: Run flake8 on python tests in openvino/tests/layer_tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a294f6fb747000..06b36f7725d1ca 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -99,7 +99,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 30abedbe2c30b7..2e2d6786bfa476 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -62,8 +62,6 @@ endif() # Check python requirements # -set(ie_build_python_req "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/requirements-dev.txt") - function(ov_check_python_build_conditions) # user explicitly specified ENABLE_PYTHON=ON if(ENABLE_PYTHON) @@ -94,26 +92,6 @@ function(ov_check_python_build_conditions) message(${message_mode} "Python 3.x Interpreter and Development.Module components are not found. OpenVINO Python API will be turned off (ENABLE_PYTHON is OFF)") endif() - # check for Cython requirement for build IE API 1.0 - ov_check_pip_packages(REQUIREMENTS_FILE ${ie_build_python_req} - RESULT_VAR ie_build_python_req_FOUND - WARNING_MESSAGE "install python3 -m pip install -r ${ie_build_python_req} for IE API 1.0 requirements" - MESSAGE_MODE TRACE) - - # cython can be installed as a debian package, so pip requirements can be unsatisfied - # so, let's check to find cython anyway - if(NOT ie_build_python_req_FOUND) - find_package(Cython QUIET - PATHS "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/cmake" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - if(CYTHON_VERSION VERSION_GREATER_EQUAL 0.29) - set(ie_build_python_req_FOUND ON) - else() - message(${message_mode} "Python requirements '${ie_build_python_req}' are missed, IE Python API 1.0 will not be built (ENABLE_PYTHON is OFF)") - endif() - endif() - if(NOT OV_GENERATOR_MULTI_CONFIG AND CMAKE_BUILD_TYPE STREQUAL "Debug" AND CMAKE_DEBUG_POSTFIX) set(python_debug ON) message(${message_mode} "Building python bindings in debug configuration is not supported on your platform (ENABLE_PYTHON is OFF)") @@ -121,15 +99,12 @@ function(ov_check_python_build_conditions) set(python_debug OFF) endif() - if((Python3_Development.Module_FOUND OR Python3_Development_FOUND) AND - ie_build_python_req_FOUND AND NOT python_debug) + if((Python3_Development.Module_FOUND OR Python3_Development_FOUND) AND NOT python_debug) set(ENABLE_PYTHON_DEFAULT ON PARENT_SCOPE) else() set(ENABLE_PYTHON_DEFAULT OFF PARENT_SCOPE) endif() - # to disable API 1.0 - set(ie_build_python_req_FOUND ${ie_build_python_req_FOUND} PARENT_SCOPE) endfunction() ov_check_python_build_conditions() @@ -155,7 +130,6 @@ function(ov_check_init_files_alignment init_files) endfunction() set(INIT_FILES_RUNTIME "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") @@ -209,7 +183,7 @@ ov_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_ if(NOT ENABLE_PYTHON) if(CMAKE_SOURCE_DIR STREQUAL OpenVINOPython_SOURCE_DIR) - message(FATAL_ERROR "Python OpenVINO API build requirements are not satisfied. Please, install ${ie_build_python_req}") + message(FATAL_ERROR "Python OpenVINO API build requirements are not satisfied.") else() return() endif() @@ -236,15 +210,8 @@ if(NOT pybind11_FOUND) add_subdirectory(thirdparty/pybind11 EXCLUDE_FROM_ALL) endif() -add_subdirectory(src/compatibility/pyngraph) add_subdirectory(src/pyopenvino) -if(ie_build_python_req_FOUND) - add_subdirectory(src/compatibility/openvino) -else() - message(WARNING "NOTE: Python API for OpenVINO 1.0 is disabled") -endif() - # # Packaging # @@ -312,7 +279,6 @@ macro(ov_define_setup_py_dependencies) endif() endforeach() - file(GLOB_RECURSE compat_ngraph_py_files ${OpenVINOPython_SOURCE_DIR}/src/compatibility/*.py) file(GLOB_RECURSE openvino_py_files ${OpenVINOPython_SOURCE_DIR}/src/openvino/*.py) list(APPEND ov_setup_py_deps diff --git a/src/bindings/python/README.md b/src/bindings/python/README.md index 620bf87c375ce2..c741f14776a68a 100644 --- a/src/bindings/python/README.md +++ b/src/bindings/python/README.md @@ -21,7 +21,6 @@ OpenVINO PYTHON API has the following structure: * [docs](./docs/) - folder that contains developer documentation and code examples. * [src](./src/) - folder with all source files for Python API. - * [src/compatibility](./src/compatibility/) - sources for compatibility API, including older modules like `ngraph` and `openvino.inference_engine`. * [src/openvino](./src/openvino/) - Python sources. * [src/openvino/preprocess](./src/openvino/preprocess/) - Torchvision to OpenVINO preprocessing converter. * [src/pyopenvino](./src/pyopenvino/) - C++ sources. diff --git a/src/bindings/python/docs/build.md b/src/bindings/python/docs/build.md index 4786332647bd6b..d0ab2e5f5f4e57 100644 --- a/src/bindings/python/docs/build.md +++ b/src/bindings/python/docs/build.md @@ -41,7 +41,6 @@ OpenVINO can be built based on specific virtual environments such as [venv](http cd pip install -r src/bindings/python/requirements.txt pip install -r src/bindings/python/requirements_test.txt - pip install -r src/bindings/python/src/compatibility/openvino/requirements-dev.txt ``` If `-DENABLE_WHEEL=ON` flag is present in `cmake` command, additionally install wheel requirements: ``` diff --git a/src/bindings/python/setup.cfg b/src/bindings/python/setup.cfg index dd8b0a75c27814..7fc407ad56e7a1 100644 --- a/src/bindings/python/setup.cfg +++ b/src/bindings/python/setup.cfg @@ -67,7 +67,6 @@ docstring-convention = google enable-extensions = G per-file-ignores = src/openvino/runtime/*/ops.py: VNE001,VNE003 - src/compatibility/ngraph/*: C101,C812,C819,CCE001,E800,N806,P101,RST201,RST202,RST203,RST206,VNE001,VNE003 src/openvino/preprocess/torchvision/*: N801, VNE001 *__init__.py: F401 diff --git a/src/bindings/python/src/compatibility/ngraph/__init__.py b/src/bindings/python/src/compatibility/ngraph/__init__.py deleted file mode 100644 index 53f10b7c60a549..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/__init__.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""ngraph module namespace, exposing factory functions for all ops and other classes.""" -# noqa: F401 - -try: - from ngraph.impl import util - - __version__ = util.get_ngraph_version_string() -except ImportError: - __version__ = "0.0.0.dev0" - - -from ngraph.impl import Dimension -from ngraph.impl import Function -from ngraph.impl import Node -from ngraph.impl import PartialShape -from ngraph.helpers import function_from_cnn -from ngraph.helpers import function_to_cnn -from ngraph.opset11 import absolute -from ngraph.opset11 import absolute as abs -from ngraph.opset11 import acos -from ngraph.opset11 import acosh -from ngraph.opset11 import adaptive_avg_pool -from ngraph.opset11 import adaptive_max_pool -from ngraph.opset11 import add -from ngraph.opset11 import asin -from ngraph.opset11 import asinh -from ngraph.opset11 import assign -from ngraph.opset11 import atan -from ngraph.opset11 import atanh -from ngraph.opset11 import avg_pool -from ngraph.opset11 import batch_norm_inference -from ngraph.opset11 import batch_to_space -from ngraph.opset11 import binary_convolution -from ngraph.opset11 import broadcast -from ngraph.opset11 import bucketize -from ngraph.opset11 import ceiling -from ngraph.opset11 import ceiling as ceil -from ngraph.opset11 import clamp -from ngraph.opset11 import concat -from ngraph.opset11 import constant -from ngraph.opset11 import convert -from ngraph.opset11 import convert_like -from ngraph.opset11 import convolution -from ngraph.opset11 import convolution_backprop_data -from ngraph.opset11 import cos -from ngraph.opset11 import cosh -from ngraph.opset11 import ctc_greedy_decoder -from ngraph.opset11 import ctc_greedy_decoder_seq_len -from ngraph.opset11 import ctc_loss -from ngraph.opset11 import cum_sum -from ngraph.opset11 import cum_sum as cumsum -from ngraph.opset11 import deformable_convolution -from ngraph.opset11 import deformable_psroi_pooling -from ngraph.opset11 import depth_to_space -from ngraph.opset11 import detection_output -from ngraph.opset11 import dft -from ngraph.opset11 import divide -from ngraph.opset11 import einsum -from ngraph.opset11 import elu -from ngraph.opset11 import embedding_bag_offsets_sum -from ngraph.opset11 import embedding_bag_packed_sum -from ngraph.opset11 import embedding_segments_sum -from ngraph.opset11 import extract_image_patches -from ngraph.opset11 import equal -from ngraph.opset11 import erf -from ngraph.opset11 import exp -from ngraph.opset11 import eye -from ngraph.opset11 import fake_quantize -from ngraph.opset11 import floor -from ngraph.opset11 import floor_mod -from ngraph.opset11 import gather -from ngraph.opset11 import gather_elements -from ngraph.opset11 import gather_nd -from ngraph.opset11 import gather_tree -from ngraph.opset11 import gelu -from ngraph.opset11 import generate_proposals -from ngraph.opset11 import greater -from ngraph.opset11 import greater_equal -from ngraph.opset11 import grid_sample -from ngraph.opset11 import grn -from ngraph.opset11 import group_convolution -from ngraph.opset11 import group_convolution_backprop_data -from ngraph.opset11 import gru_cell -from ngraph.opset11 import gru_sequence -from ngraph.opset11 import hard_sigmoid -from ngraph.opset11 import hsigmoid -from ngraph.opset11 import hswish -from ngraph.opset11 import idft -from ngraph.opset11 import if_op -from ngraph.opset11 import interpolate -from ngraph.opset11 import irdft -from ngraph.opset11 import is_finite -from ngraph.opset11 import is_inf -from ngraph.opset11 import is_nan -from ngraph.opset11 import i420_to_bgr -from ngraph.opset11 import i420_to_rgb -from ngraph.opset11 import less -from ngraph.opset11 import less_equal -from ngraph.opset11 import log -from ngraph.opset11 import logical_and -from ngraph.opset11 import logical_not -from ngraph.opset11 import logical_or -from ngraph.opset11 import logical_xor -from ngraph.opset11 import log_softmax -from ngraph.opset11 import loop -from ngraph.opset11 import lrn -from ngraph.opset11 import lstm_cell -from ngraph.opset11 import lstm_sequence -from ngraph.opset11 import matmul -from ngraph.opset11 import matrix_nms -from ngraph.opset11 import max_pool -from ngraph.opset11 import maximum -from ngraph.opset11 import minimum -from ngraph.opset11 import mish -from ngraph.opset11 import mod -from ngraph.opset11 import multiclass_nms -from ngraph.opset11 import multiply -from ngraph.opset11 import mvn -from ngraph.opset11 import negative -from ngraph.opset11 import non_max_suppression -from ngraph.opset11 import non_zero -from ngraph.opset11 import normalize_l2 -from ngraph.opset11 import not_equal -from ngraph.opset11 import nv12_to_bgr -from ngraph.opset11 import nv12_to_rgb -from ngraph.opset11 import one_hot -from ngraph.opset11 import pad -from ngraph.opset11 import parameter -from ngraph.opset11 import power -from ngraph.opset11 import prelu -from ngraph.opset11 import prior_box -from ngraph.opset11 import prior_box_clustered -from ngraph.opset11 import psroi_pooling -from ngraph.opset11 import proposal -from ngraph.opset11 import random_uniform -from ngraph.opset11 import range -from ngraph.opset11 import rdft -from ngraph.opset11 import read_value -from ngraph.opset11 import reduce_l1 -from ngraph.opset11 import reduce_l2 -from ngraph.opset11 import reduce_logical_and -from ngraph.opset11 import reduce_logical_or -from ngraph.opset11 import reduce_max -from ngraph.opset11 import reduce_mean -from ngraph.opset11 import reduce_min -from ngraph.opset11 import reduce_prod -from ngraph.opset11 import reduce_sum -from ngraph.opset11 import region_yolo -from ngraph.opset11 import reorg_yolo -from ngraph.opset11 import relu -from ngraph.opset11 import reshape -from ngraph.opset11 import result -from ngraph.opset11 import reverse_sequence -from ngraph.opset11 import rnn_cell -from ngraph.opset11 import rnn_sequence -from ngraph.opset11 import roi_align -from ngraph.opset11 import roi_pooling -from ngraph.opset11 import roll -from ngraph.opset11 import round -from ngraph.opset11 import scatter_elements_update -from ngraph.opset11 import scatter_update -from ngraph.opset11 import select -from ngraph.opset11 import selu -from ngraph.opset11 import shape_of -from ngraph.opset11 import shuffle_channels -from ngraph.opset11 import sigmoid -from ngraph.opset11 import sign -from ngraph.opset11 import sin -from ngraph.opset11 import sinh -from ngraph.opset11 import slice -from ngraph.opset11 import softmax -from ngraph.opset11 import softplus -from ngraph.opset11 import softsign -from ngraph.opset11 import space_to_batch -from ngraph.opset11 import space_to_depth -from ngraph.opset11 import split -from ngraph.opset11 import sqrt -from ngraph.opset11 import squared_difference -from ngraph.opset11 import squeeze -from ngraph.opset11 import strided_slice -from ngraph.opset11 import subtract -from ngraph.opset11 import swish -from ngraph.opset11 import tan -from ngraph.opset11 import tanh -from ngraph.opset11 import tensor_iterator -from ngraph.opset11 import tile -from ngraph.opset11 import topk -from ngraph.opset11 import transpose -from ngraph.opset11 import unique -from ngraph.opset11 import unsqueeze -from ngraph.opset11 import variadic_split - -import warnings - -warnings.warn( - message="OpenVINO nGraph Python API is deprecated and will be removed in 2024.0 release." - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html", - category=FutureWarning, - stacklevel=2, -) - -# Extend Node class to support binary operators -Node.__add__ = add -Node.__sub__ = subtract -Node.__mul__ = multiply -Node.__div__ = divide -Node.__truediv__ = divide -Node.__radd__ = lambda left, right: add(right, left) -Node.__rsub__ = lambda left, right: subtract(right, left) -Node.__rmul__ = lambda left, right: multiply(right, left) -Node.__rdiv__ = lambda left, right: divide(right, left) -Node.__rtruediv__ = lambda left, right: divide(right, left) -Node.__eq__ = equal -Node.__ne__ = not_equal -Node.__lt__ = less -Node.__le__ = less_equal -Node.__gt__ = greater -Node.__ge__ = greater_equal diff --git a/src/bindings/python/src/compatibility/ngraph/exceptions.py b/src/bindings/python/src/compatibility/ngraph/exceptions.py deleted file mode 100644 index 1597b6ec662db1..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/exceptions.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""ngraph exceptions hierarchy. All exceptions are descendants of NgraphError.""" - - -class NgraphError(Exception): - """Base class for Ngraph exceptions.""" - - -class UserInputError(NgraphError): - """User provided unexpected input.""" - - -class NgraphTypeError(NgraphError, TypeError): - """Type mismatch error.""" diff --git a/src/bindings/python/src/compatibility/ngraph/helpers.py b/src/bindings/python/src/compatibility/ngraph/helpers.py deleted file mode 100644 index 236adec6d8b374..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/helpers.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""nGraph helper functions.""" - -from typing import Union - -from ngraph.impl import Function -from openvino.inference_engine import IENetwork # type: ignore - - -def function_from_cnn(cnn_network: IENetwork) -> Function: - """Get nGraph function from Inference Engine CNN network.""" - capsule = cnn_network._get_function_capsule() - ng_function = Function.from_capsule(capsule) - return ng_function - - -def function_to_cnn(ng_function: Function) -> Function: - """Get Inference Engine CNN network from nGraph function.""" - capsule = Function.to_capsule(ng_function) - return IENetwork(capsule) diff --git a/src/bindings/python/src/compatibility/ngraph/impl/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/__init__.py deleted file mode 100644 index f06340f795e93b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph -Low level wrappers for the nGraph c++ api. -""" - -# flake8: noqa - -import os -import sys - -if sys.platform == "win32": - # Installer, yum, pip installs openvino dlls to the different directories - # and those paths need to be visible to the openvino modules - # - # If you're using a custom installation of openvino, - # add the location of openvino dlls to your system PATH. - # - # looking for the libs in the pip installation path by default. - openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', '..'), os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')] - # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. - openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS') - if openvino_libs_installer: - openvino_libs.extend(openvino_libs_installer.split(';')) - for lib in openvino_libs: - lib_path = os.path.join(os.path.dirname(__file__), lib) - if os.path.isdir(lib_path): - # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. - if (3, 8) <= sys.version_info: - os.add_dll_directory(os.path.abspath(lib_path)) - else: - os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] - -from _pyngraph import Dimension -from _pyngraph import Function -from _pyngraph import Input -from _pyngraph import Output -from _pyngraph import Node -from _pyngraph import Type -from _pyngraph import PartialShape -from _pyngraph import Shape -from _pyngraph import Strides -from _pyngraph import CoordinateDiff -from _pyngraph import AxisSet -from _pyngraph import AxisVector -from _pyngraph import Coordinate -from _pyngraph import Output -from _pyngraph import DiscreteTypeInfo -from _pyngraph import util diff --git a/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py deleted file mode 100644 index e854c97eab8ff6..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph.op -Low level wrappers for the nGraph c++ api in ngraph::op. -""" - -# flake8: noqa - -import numpy as np - -from _pyngraph.op import Constant - -"""Retrieve Constant inner data. - - Internally uses PyBind11 Numpy's buffer protocol. - - :return Numpy array containing internally stored constant data. -""" -Constant.get_data = lambda self: np.array(self, copy=True) - -from _pyngraph.op import Parameter diff --git a/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py deleted file mode 100644 index 85e838f0aab75f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph.op.util -Low level wrappers for the nGraph c++ api in ngraph::op::util. -""" -# flake8: noqa - -from _pyngraph.op.util import UnaryElementwiseArithmetic -from _pyngraph.op.util import BinaryElementwiseComparison -from _pyngraph.op.util import BinaryElementwiseArithmetic -from _pyngraph.op.util import BinaryElementwiseLogical -from _pyngraph.op.util import OpAnnotations -from _pyngraph.op.util import ArithmeticReduction -from _pyngraph.op.util import IndexReduction diff --git a/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py deleted file mode 100644 index 5ea0b0618b0a88..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# flake8: noqa - -from _pyngraph.passes import Manager diff --git a/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py deleted file mode 100644 index 1d960ff8bbe350..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset1.ops import binary_convolution -from ngraph.opset1.ops import broadcast -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset1.ops import negative -from ngraph.opset1.ops import non_max_suppression -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset1.ops import shape_of -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset1.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset1/ops.py b/src/bindings/python/src/compatibility/ngraph/opset1/ops.py deleted file mode 100644 index 72a2831d8b246f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset1/ops.py +++ /dev/null @@ -1,2772 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, PartialShape, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - - -_get_node_factory_opset1 = partial(_get_node_factory, "opset1") - -# -------------------------------------------- ops ------------------------------------------------ - - -@unary_op -def absolute(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = abs(x) to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with Abs operation applied on it. - """ - return _get_node_factory_opset1().create("Abs", [node]) - - -@unary_op -def acos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccos operation applied on it. - """ - return _get_node_factory_opset1().create("Acos", [node]) - - -@binary_op -def add( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A+B to the input nodes element-wise.""" - return _get_node_factory_opset1().create("Add", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def asin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsin operation applied on it. - """ - return _get_node_factory_opset1().create("Asin", [node]) - - -@unary_op -def atan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctan operation applied on it. - """ - return _get_node_factory_opset1().create("Atan", [node]) - - -@nameable_op -def avg_pool( - data_batch: NodeInput, - strides: List[int], - pads_begin: TensorShape, - pads_end: TensorShape, - kernel_shape: TensorShape, - exclude_pad: bool, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - name: Optional[str] = None, -) -> Node: - """Return average pooling node. - - :param data_batch: The input node providing data. - :param strides: The window movement strides. - :param pads_begin: The input data optional padding below filled with zeros. - :param pads_end: The input data optional padding below filled with zeros. - :param kernel_shape: The pooling window shape. - :param exclude_pad: Whether or not to include zero padding in average computations. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable - values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid'] - :param name: Optional name for the new output node. - - :return: New node with AvgPool operation applied on its data. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset1().create( - "AvgPool", - [as_node(data_batch)], - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "exclude-pad": exclude_pad, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - }, - ) - - -@nameable_op -def batch_norm_inference( - data: NodeInput, - gamma: NodeInput, - beta: NodeInput, - mean: NodeInput, - variance: NodeInput, - epsilon: float, - name: Optional[str] = None, -) -> Node: - """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. - - :param data: The input tensor with data for normalization. - :param gamma: The scalar scaling for normalized value. - :param beta: The bias added to the scaled normalized value. - :param mean: The value for mean normalization. - :param variance: The value for variance normalization. - :param epsilon: The number to be added to the variance to avoid division - by zero when normalizing a value. - :param name: The optional name of the output node. - :return: The new node which performs BatchNormInference. - """ - inputs = as_nodes(gamma, beta, data, mean, variance) - return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon}) - - -@nameable_op -def binary_convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - mode: str, - pad_value: float, - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Create node performing convolution with binary weights, binary input and integer output. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted. - :param pad_value: Floating-point value used to fill pad area. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing binary convolution operation. - """ - return _get_node_factory_opset1().create( - "BinaryConvolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "mode": mode, - "pad_value": pad_value, - "auto_pad": auto_pad, - }, - ) - - -@nameable_op -def broadcast( - data: NodeInput, - target_shape: NodeInput, - axes_mapping: Optional[NodeInput] = None, - mode: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. - - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result - that are being broadcast. - :param mode: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: NUMPY, EXPLICIT. - :param name: Optional new name for output node. - :return: New node with broadcast shape. - """ - inputs = as_nodes(data, target_shape) - if mode.upper() == "EXPLICIT": - inputs.append(as_node(axes_mapping)) - return _get_node_factory_opset1().create("Broadcast", inputs, {"mode": mode.upper()}) - - -@nameable_op -def ctc_greedy_decoder( - data: NodeInput, - sequence_mask: NodeInput, - merge_repeated: bool = True, - name: Optional[str] = None, -) -> Node: - """Perform greedy decoding on the logits given in input (best path). - - :param data: Logits on which greedy decoding is performed. - :param sequence_mask: The tensor with sequence masks for each sequence in the batch. - :param merge_repeated: The flag for merging repeated labels during the CTC calculation. - :param name: Optional name for output node. - :return: The new node performing an CTCGreedyDecoder operation on input tensor. - """ - node_inputs = as_nodes(data, sequence_mask) - return _get_node_factory_opset1().create("CTCGreedyDecoder", node_inputs, {"ctc_merge_repeated": merge_repeated}) - - -@unary_op -def ceiling(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies ceiling to the input node element-wise. - - :param node: The node providing data to ceiling operation. - :param name: Optional name for output node. - :return: The node performing element-wise ceiling. - """ - return _get_node_factory_opset1().create("Ceiling", [node]) - - -@nameable_op -def clamp(data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None) -> Node: - """Perform clamp element-wise on data from input node. - - :param data: Input tensor. One of: input node, array or scalar. - :param min_value: The lower bound of the range. Scalar value. - :param max_value: The upper bound of the range. Scalar value. - :param name: Optional output node name. - :return: The new node performing a clamp operation on its input data element-wise. - - Performs a clipping operation on an input value between a pair of boundary values. - - For each element in `data`, if the element's value is lower than `min_value`, - it will be replaced with `min_value`. If the value is higher than `max_value`, - it will be replaced by `max_value`. - Intermediate values of `data` are returned without change. - - Clamp uses the following logic: - - .. code-block:: python - - if data < min_value: - data=min_value - elif data > max_value: - data=max_value - """ - return _get_node_factory_opset1().create("Clamp", [as_node(data)], {"min": min_value, "max": max_value}) - - -@nameable_op -def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Node: - """Concatenate input nodes into single new node along specified axis. - - :param nodes: The nodes we want concatenate into single new node. - :param axis: The axis along which we want to concatenate input nodes. - :param name: The optional new name for output node. - :return: Return new node that is a concatenation of input nodes. - """ - return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis}) - - -@nameable_op -def constant(value: NumericData, dtype: Optional[NumericType] = None, name: Optional[str] = None) -> Constant: - """Create a Constant node from provided value. - - :param value: One of: array of values or scalar to initialize node with. - :param dtype: The data type of provided data. - :param name: Optional name for output node. - :return: The Constant node initialized with provided data. - """ - return make_constant_node(value, dtype) - - -@nameable_op -def convert(data: NodeInput, destination_type: Union[str, NumericType], name: Optional[str] = None) -> Node: - """Return node which casts input node values to specified type. - - :param data: Node which produces the input tensor. - :param destination_type: Provides the target type for the conversion. - :param name: Optional name for the output node. - :return: New node performing the conversion operation. - """ - if not isinstance(destination_type, str): - destination_type = get_element_type_str(destination_type) - return _get_node_factory_opset1().create("Convert", [as_node(data)], {"destination_type": destination_type.lower()}) - - -@binary_op -def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -> Node: - """Return node which casts data node values to the type of another node. - - :param data: Node which produces the input tensor - :param like: Node which provides the target type information for the conversion - :param name: Optional name for the output node. - :return: New node performing the conversion operation. - """ - return _get_node_factory_opset1().create("ConvertLike", [data, like]) - - -@nameable_op -def convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Return node performing batched convolution operation. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate. - :param pads_end: The number of zero padding elements to add on each axis above max coordinate - :param dilations: The data batch dilation strides. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing batched convolution operation. - """ - return _get_node_factory_opset1().create( - "Convolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - }, - ) - - -@nameable_op -def convolution_backprop_data( - data: NodeInput, - filters: NodeInput, - strides: List[int], - output_shape: Optional[NodeInput] = None, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - dilations: Optional[List[int]] = None, - auto_pad: Optional[str] = None, - output_padding: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Create node performing a batched-convolution backprop data operation. - - :param data: The node producing data from forward-prop - :param filters: The node producing the filters from forward-prop. - :param output_shape: The node producing output delta. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) - in the filter. - :param name: The node name. - - :return: The node object representing ConvolutionBackpropData operation. - """ - spatial_dim_count = len(strides) - if pads_begin is None: - pads_begin = [0] * spatial_dim_count - if pads_end is None: - pads_end = [0] * spatial_dim_count - if dilations is None: - dilations = [1] * spatial_dim_count - if auto_pad is None: - auto_pad = "explicit" - if output_padding is None: - output_padding = [0] * spatial_dim_count - args = as_nodes(data, filters) - if output_shape is not None: - args.append(as_node(output_shape)) - - return _get_node_factory_opset1().create( - "ConvolutionBackpropData", - args, - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - "output_padding": output_padding, - }, - ) - - -@unary_op -def cos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cos operation applied on it. - """ - return _get_node_factory_opset1().create("Cos", [node]) - - -@unary_op -def cosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cosh operation applied on it. - """ - return _get_node_factory_opset1().create("Cosh", [node]) - - -@nameable_op -def deformable_convolution( - data: NodeInput, - deformable_values: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - group: int = 1, - deformable_group: int = 1, - name: Optional[str] = None, -) -> Node: - """Create node performing deformable convolution. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param group: The number of groups which both output and input should be split into. - :param deformable_group: The number of groups which deformable values and output should be split - into along the channel axis. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - return _get_node_factory_opset1().create( - "DeformableConvolution", - as_nodes(data, deformable_values, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - "group": group, - "deformable_group": deformable_group, - }, - ) - - -@nameable_op -def deformable_psroi_pooling( - feature_maps: NodeInput, - coords: NodeInput, - output_dim: int, - spatial_scale: float, - group_size: int = 1, - mode: str = "bilinear_deformable", - spatial_bins_x: int = 1, - spatial_bins_y: int = 1, - trans_std: float = 1.0, - part_size: int = 1, - offsets: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return node performing DeformablePSROIPooling operation. - - DeformablePSROIPooling computes position-sensitive pooling - on regions of interest specified by input. - - :param feature_maps: 4D tensor with feature maps. - :param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2]. - :param output_dim: A pooled output channel number. - :param spatial_scale: A multiplicative spatial scale factor to translate ROI. - :param group_size: The number of groups to encode position-sensitive score. - :param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable']. - :param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width. - :param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height. - :param trans_std: The value that all transformation (offset) values are multiplied with. - :param part_size: The number of parts the output tensor spatial dimensions are divided into. - :param offsets: Optional node. 4D input blob with transformation values (offsets). - :param name: The optional new name for output node. - :return: New node performing DeformablePSROIPooling operation. - """ - node_inputs = as_nodes(feature_maps, coords) - if offsets is not None: - node_inputs.append(as_node(offsets)) - - return _get_node_factory_opset1().create( - "DeformablePSROIPooling", - node_inputs, - { - "output_dim": output_dim, - "spatial_scale": spatial_scale, - "group_size": group_size, - "mode": mode, - "spatial_bins_x": spatial_bins_x, - "spatial_bins_y": spatial_bins_y, - "trans_std": trans_std, - "part_size": part_size, - }, - ) - - -@nameable_op -def depth_to_space(node: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node: - """Rearranges input tensor from depth into blocks of spatial data. - - Values from the height and width dimensions are moved to the depth dimension. - - Input tensor has shape [N,C,H,W], where N is the batch axis, C is the channel or depth, - H is the height and W is the width. - - Output node produces a tensor with shape: - - [N, C * `block_size` * `block_size`, H / `block_size`, W / `block_size`] - - :param node: The node with input tensor data. - :param mode: Specifies how the input depth dimension is split to block coordinates - - blocks_first: The input is divided to [block_size, ..., block_size, new_depth] - depth_first: The input is divided to [new_depth, block_size, ..., block_size] - - :param block_size: The size of the spatial block of values describing - how the tensor's data is to be rearranged. - :param name: Optional output node name. - :return: The new node performing an DepthToSpace operation on its input tensor. - """ - return _get_node_factory_opset1().create( - "DepthToSpace", - [node], - {"mode": mode, "block_size": block_size}, - ) - - -@nameable_op -def detection_output( - box_logits: Node, - class_preds: Node, - proposals: Node, - attrs: dict, - aux_class_preds: NodeInput = None, - aux_box_preds: NodeInput = None, - name: Optional[str] = None, -) -> Node: - """Generate the detection output using information on location and confidence predictions. - - :param box_logits: The 2D input tensor with box logits. - :param class_preds: The 2D input tensor with class predictions. - :param proposals: The 3D input tensor with proposals. - :param attrs: The dictionary containing key, value pairs for attributes. - :param aux_class_preds: The 2D input tensor with additional class predictions information. - :param aux_box_preds: The 2D input tensor with additional box predictions information. - :param name: Optional name for the output node. - :return: Node representing DetectionOutput operation. - - Available attributes are: - - * num_classes The number of classes to be predicted. - Range of values: positive integer number - Default value: None - Required: yes - - * background_label_id The background label id. - Range of values: integer value - Default value: 0 - Required: no - - * top_k Maximum number of results to be kept per batch after NMS step. - Range of values: integer value - Default value: -1 - Required: no - - * variance_encoded_in_target The flag that denotes if variance is encoded in target. - Range of values: {False, True} - Default value: False - Required: no - - * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step. - Range of values: integer values - Default value: None - Required: yes - - * code_type The type of coding method for bounding boxes. - Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE', - 'caffe.PriorBoxParameter.CORNER'} - Default value: 'caffe.PriorBoxParameter.CORNER' - Required: no - - * share_location The flag that denotes if bounding boxes are shared among different - classes. - Range of values: {True, False} - Default value: True - Required: no - - * nms_threshold The threshold to be used in the NMS stage. - Range of values: floating point value - Default value: None - Required: yes - - * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be - considered. - Range of values: floating point value - Default value: 0 - Required: no - - * clip_after_nms The flag that denotes whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - - * clip_before_nms The flag that denotes whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - - * decrease_label_id The flag that denotes how to perform NMS. - Range of values: False - perform NMS like in Caffe*. - True - perform NMS like in MxNet*. - - Default value: False - Required: no - - * normalized The flag that denotes whether input tensors with boxes are normalized. - Range of values: {True, False} - Default value: False - Required: no - - * input_height The input image height. - Range of values: positive integer number - Default value: 1 - Required: no - - * input_width The input image width. - Range of values: positive integer number - Default value: 1 - Required: no - - * objectness_score The threshold to sort out confidence predictions. - Range of values: non-negative float number - Default value: 0 - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'num_classes': 85, - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - } - - attrs = { - 'num_classes': 85, - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - 'normalized': True, - 'clip_before_nms': True, - 'input_height': [32], - 'input_width': [32], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("num_classes", True, np.integer, is_positive_value), - ("background_label_id", False, np.integer, None), - ("top_k", False, np.integer, None), - ("variance_encoded_in_target", False, np.bool_, None), - ("keep_top_k", True, np.integer, None), - ("code_type", False, np.str_, None), - ("share_location", False, np.bool_, None), - ("nms_threshold", True, np.floating, None), - ("confidence_threshold", False, np.floating, None), - ("clip_after_nms", False, np.bool_, None), - ("clip_before_nms", False, np.bool_, None), - ("decrease_label_id", False, np.bool_, None), - ("normalized", False, np.bool_, None), - ("input_height", False, np.integer, is_positive_value), - ("input_width", False, np.integer, is_positive_value), - ("objectness_score", False, np.floating, is_non_negative_value), - ] - - check_valid_attributes("DetectionOutput", attrs, requirements) - - inputs = [box_logits, class_preds, proposals] - if aux_class_preds is not None: - inputs.append(aux_class_preds) - if aux_box_preds is not None: - inputs.append(aux_box_preds) - - return _get_node_factory_opset1().create("DetectionOutput", inputs, attrs) - - -@binary_op -def divide( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A/B to the input nodes element-wise. - - :param left_node: The node providing dividend data. - :param right_node: The node providing divisor data. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise division. - """ - return _get_node_factory_opset1().create("Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node: - """Perform Exponential Linear Unit operation element-wise on data from input node. - - Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise. - - For more information refer to: - [Fast and Accurate Deep Network Learning by Exponential Linear Units](http://arxiv.org/abs/1511.07289) - - :param data: Input tensor. One of: input node, array or scalar. - :param alpha: Scalar multiplier for negative values. - :param name: Optional output node name. - :return: The new node performing an ELU operation on its input data element-wise. - """ - return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha}) - - -@binary_op -def equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if input nodes are equal element-wise. - - :param left_node: The first input node for equal operation. - :param right_node: The second input node for equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise equality check. - """ - return _get_node_factory_opset1().create("Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def erf(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which calculates Gauss error function element-wise with given tensor. - - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing element-wise Erf operation. - """ - return _get_node_factory_opset1().create("Erf", [node]) - - -@unary_op -def exp(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies exponential function to the input node element-wise. - - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing natural exponential operation. - """ - return _get_node_factory_opset1().create("Exp", [node]) - - -@nameable_op -def fake_quantize( - data: NodeInput, - input_low: NodeInput, - input_high: NodeInput, - output_low: NodeInput, - output_high: NodeInput, - levels: int, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - r"""Perform an element-wise linear quantization on input data. - - :param data: The node with data tensor. - :param input_low: The node with the minimum for input values. - :param input_high: The node with the maximum for input values. - :param output_low: The node with the minimum quantized value. - :param output_high: The node with the maximum quantized value. - :param levels: The number of quantization levels. Integer value. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :return: New node with quantized value. - - Input floating point values are quantized into a discrete set of floating point values. - - .. code-block:: python - - if x <= input_low: - output = output_low - if x > input_high: - output = output_high - else: - output = fake_quantize(output) - - Fake quantize uses the following logic: - - .. math:: - - output = - \dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})} - {(levels-1)\cdot (output\_high - output\_low)} + output\_low - """ - return _get_node_factory_opset1().create( - "FakeQuantize", - as_nodes(data, input_low, input_high, output_low, output_high), - {"levels": levels, "auto_broadcast": auto_broadcast.upper()}, - ) - - -@unary_op -def floor(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies floor to the input node element-wise. - - :param node: The input node providing data. - :param name: The optional name for new output node. - :return: The node performing element-wise floor operation. - """ - return _get_node_factory_opset1().create("Floor", [node]) - - -@binary_op -def floor_mod( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node performing element-wise FloorMod (division reminder) with two given tensors. - - :param left_node: The first input node for FloorMod operation. - :param right_node: The second input node for FloorMod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise FloorMod operation. - """ - return _get_node_factory_opset1().create("FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def gather(data: NodeInput, indices: NodeInput, axis: NodeInput, name: Optional[str] = None) -> Node: - """Return Gather node which takes slices from axis of data according to indices. - - :param data: The tensor from which slices are gathered. - :param indices: Tensor with indexes to gather. - :param axis: The dimension index to gather data from. - :param name: Optional name for output node. - :return: The new node performing a Gather operation on the data input tensor. - """ - node_inputs = as_nodes(data, indices, axis) - return _get_node_factory_opset1().create("Gather", node_inputs) - - -@nameable_op -def gather_tree( - step_ids: NodeInput, - parent_idx: NodeInput, - max_seq_len: NodeInput, - end_token: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform GatherTree operation. - - :param step_ids: The tensor with indices from per each step. - :param parent_idx: The tensor with with parent beam indices. - :param max_seq_len: The tensor with maximum lengths for each sequence in the batch. - :param end_token: The scalar tensor with value of the end marker in a sequence. - :param name: Optional name for output node. - :return: The new node performing a GatherTree operation. - - The GatherTree node generates the complete beams from the indices per each step - and the parent beam indices. - GatherTree uses the following logic: - - .. code-block:: python - - for batch in range(BATCH_SIZE): - for beam in range(BEAM_WIDTH): - max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch]) - - parent = parent_idx[max_sequence_in_beam - 1, batch, beam] - - for level in reversed(range(max_sequence_in_beam - 1)): - final_idx[level, batch, beam] = step_idx[level, batch, parent] - - parent = parent_idx[level, batch, parent] - - """ - node_inputs = as_nodes(step_ids, parent_idx, max_seq_len, end_token) - return _get_node_factory_opset1().create("GatherTree", node_inputs) - - -@binary_op -def greater( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is greater than the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than right_node. - """ - return _get_node_factory_opset1().create("Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def greater_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left node is greater or equal to the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than or equal - right_node. - """ - return _get_node_factory_opset1().create("GreaterEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -def grn(data: Node, bias: float, name: Optional[str] = None) -> Node: - r"""Perform Global Response Normalization with L2 norm (across channels only). - - Computes GRN operation on channels for input tensor: - - .. math:: output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} - - :param data: The node with data tensor. - :param bias: The bias added to the variance. Scalar value. - :param name: Optional output node name. - :return: The new node performing a GRN operation on tensor's channels. - """ - return _get_node_factory_opset1().create("GRN", [data], {"bias": bias}) - - -@nameable_op -def group_convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Perform Group Convolution operation on data from input node. - - :param data: The node producing input data. - :param filters: The node producing filters data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: Describes how to perform padding. Possible values: - EXPLICIT: Pad dimensions are explicity specified - SAME_LOWER: Pad dimensions computed to match input shape - Ceil(num_dims/2) at the beginning and - Floor(num_dims/2) at the end - SAME_UPPER: Pad dimensions computed to match input shape - Floor(num_dims/2) at the beginning and - Ceil(num_dims/2) at the end - VALID: No padding - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. - """ - return _get_node_factory_opset1().create( - "GroupConvolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - }, - ) - - -@nameable_op -def group_convolution_backprop_data( - data: NodeInput, - filters: NodeInput, - strides: List[int], - output_shape: Optional[NodeInput] = None, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - dilations: Optional[List[int]] = None, - auto_pad: str = "EXPLICIT", - output_padding: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Perform Group Convolution operation on data from input node. - - :param data: The node producing input data. - :param filters: The node producing filter data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param output_shape: The node that specifies spatial shape of the output. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) - in the filter. - :param auto_pad: Describes how to perform padding. Possible values: - EXPLICIT: Pad dimensions are explicity specified - SAME_LOWER: Pad dimensions computed to match input shape - Ceil(num_dims/2) at the beginning and - Floor(num_dims/2) at the end - SAME_UPPER: Pad dimensions computed to match input shape - Floor(num_dims/2) at the beginning and - Ceil(num_dims/2) at the end - VALID: No padding - :param output_padding: The additional amount of paddings added per each spatial axis - in the output tensor. - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. - """ - spatial_dim_count = len(strides) - if dilations is None: - dilations = [1] * spatial_dim_count - if output_padding is None: - output_padding = [0] * spatial_dim_count - - attributes = { - "strides": strides, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - "output_padding": output_padding, - } - args = as_nodes(data, filters) - - if output_shape is not None: - args.append(as_node(output_shape)) - else: - if pads_begin is None: - pads_begin = [0] * spatial_dim_count - if pads_end is None: - pads_end = [0] * spatial_dim_count - attributes["pads_begin"] = pads_begin - attributes["pads_end"] = pads_end - - return _get_node_factory_opset1().create("GroupConvolutionBackpropData", args, attributes) - - -@nameable_op -def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[str] = None) -> Node: - """Perform Hard Sigmoid operation element-wise on data from input node. - - :param data: The node with data tensor. - :param alpha: A node producing the alpha parameter. - :param beta: A node producing the beta parameter - :param name: Optional output node name. - :return: The new node performing a Hard Sigmoid element-wise on input tensor. - - Hard Sigmoid uses the following logic: - - .. code-block:: python - - y = max(0, min(1, alpha * data + beta)) - - """ - return _get_node_factory_opset1().create("HardSigmoid", [data, as_node(alpha), as_node(beta)]) - - -@nameable_op -def interpolate(image: Node, output_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Perform interpolation of independent slices in input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param output_shape: 1D tensor describing output shape for spatial axes. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing interpolation operation. - - Available attributes are: - - * axes Specify spatial dimension indices where interpolation is applied. - Type: List of non-negative integer numbers. - Required: yes. - - * mode Specifies type of interpolation. - Range of values: one of {nearest, linear, cubic, area} - Type: string - Required: yes - - * align_corners A flag that specifies whether to align corners or not. True means the - alignment is applied, False means the alignment isn't applied. - Range of values: True or False. Default: True. - Required: no - - * antialias A flag that specifies whether to perform anti-aliasing. - Range of values: False - do not perform anti-aliasing - True - perform anti-aliasing - Default value: False - Required: no - - * pads_begin Specify the number of pixels to add to the beginning of the image being - interpolated. A scalar that specifies padding for each spatial dimension. - Range of values: list of non-negative integer numbers. Default value: 0 - Required: no - - * pads_end Specify the number of pixels to add to the beginning of the image being - interpolated. A scalar that specifies padding for each spatial dimension. - Range of values: list of non-negative integer numbers. Default value: 0 - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'axes': [2, 3], - 'mode': 'cubic', - } - - attrs = { - 'axes': [2, 3], - 'mode': 'cubic', - 'antialias': True, - 'pads_begin': [2, 2, 2], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("axes", True, np.integer, is_non_negative_value), - ("mode", True, np.str_, None), - ("align_corners", False, np.bool_, None), - ("antialias", False, np.bool_, None), - ("pads_begin", False, np.integer, is_non_negative_value), - ("pads_end", False, np.integer, is_non_negative_value), - ] - - check_valid_attributes("Interpolate", attrs, requirements) - - return _get_node_factory_opset1().create("Interpolate", [image, as_node(output_shape)], attrs) - - -@binary_op -def less( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is less than the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than the right_node. - """ - return _get_node_factory_opset1().create("Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def less_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is less or equal the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than or equal the - right_node. - """ - return _get_node_factory_opset1().create("LessEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def log(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies natural logarithm to the input node element-wise. - - :param node: The input node providing data for operation. - :param name: The optional new name for output node. - :return: The new node performing log operation element-wise. - """ - return _get_node_factory_opset1().create("Log", [node]) - - -@binary_op -def logical_and( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which perform logical and operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical and operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def logical_not(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies element-wise logical negation to the input node. - - :param node: The input node providing data. - :param name: The optional new name for output node. - :return: The node performing element-wise logical NOT operation with given tensor. - """ - return _get_node_factory_opset1().create("LogicalNot", [node]) - - -@binary_op -def logical_or( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which performs logical OR operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def logical_xor( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which performs logical XOR operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def lrn( - data: NodeInput, - axes: NodeInput, - alpha: float = 1, - beta: float = 0.5, - bias: float = 1, - size: int = 5, - name: Optional[str] = None, -) -> Node: - """Return a node which performs element-wise Local Response Normalization (LRN) operation. - - :param data: Input data. - :param alpha: A scale factor (usually positive). - :param beta: An exponent. - :param bias: An offset (usually positive) to avoid dividing by 0. - :param size: Width of the 1-D normalization window. - :param name: An optional name of the output node. - :return: The new node which performs LRN. - """ - attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size} - return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes) - - -@nameable_op -def lstm_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMCell operation. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMCell. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) - - # P - nGraph additional input, no such input in the OV spec - peepholes_count = 3 # nGraph default - peepholes_shape = [peepholes_count * hidden_size] - peepholes_array = np.zeros(peepholes_shape) # nGraph default - data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) - default_P = make_constant_node(peepholes_array, dtype=data_dtype) - node_inputs.append(default_P) - - weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec - input_forget = False # nGraph default, no such attribute in the OV spec - - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - "weights_format": weights_format, - "input_forget": input_forget, - } - return _get_node_factory_opset1().create("LSTMCell", node_inputs, attributes) - - -@nameable_op -def lstm_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param initial_cell_state: The cell state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, 4*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, 4*hidden_size, hidden_size]. - :param B: The tensor with biases. - Shape: [num_directions, 4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMSequence. Node outputs count: 3. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) - - # P - nGraph additional input, no such input in the OV spec - peepholes_count = 3 # nGraph default - if direction.lower() == "bidirectional": - num_directions = 2 - else: - num_directions = 1 - peepholes_shape = [num_directions, peepholes_count * hidden_size] - peepholes_array = np.zeros(peepholes_shape) # nGraph default - data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) - default_P = make_constant_node(peepholes_array, dtype=data_dtype) - node_inputs.append(default_P) - - weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec - input_forget = False # nGraph default, no such attribute in the OV spec - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - "weights_format": weights_format, - "input_forget": input_forget, - } - return _get_node_factory_opset1().create("LSTMSequence", node_inputs, attributes) - - -@nameable_op -def matmul( - data_a: NodeInput, - data_b: NodeInput, - transpose_a: bool, - transpose_b: bool, - name: Optional[str] = None, -) -> Node: - """Return the Matrix Multiplication operation. - - :param data_a: left-hand side matrix - :param data_b: right-hand side matrix - :param transpose_a: should the first matrix be transposed before operation - :param transpose_b: should the second matrix be transposed - :return: MatMul operation node - """ - return _get_node_factory_opset1().create("MatMul", as_nodes(data_a, data_b), {"transpose_a": transpose_a, "transpose_b": transpose_b}) - - -@nameable_op -def max_pool( - data: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - kernel_shape: TensorShape, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - name: Optional[str] = None, -) -> Node: - """Perform max pooling operation with given parameters on provided data. - - :param data: The node providing input data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param kernel_shape: The pooling operation kernel shape. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable - values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid'] - :param name: The optional name for the created output node. - - :return: The new node performing max pooling operation. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset1().create( - "MaxPool", - [as_node(data)], - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - }, - ) - - -@binary_op -def maximum( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies the maximum operation to input nodes elementwise.""" - return _get_node_factory_opset1().create("Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def minimum( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies the minimum operation to input nodes elementwise.""" - return _get_node_factory_opset1().create("Minimum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def mod( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node performing element-wise division reminder with two given tensors. - - :param left_node: The first input node for mod operation. - :param right_node: The second input node for mod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise Mod operation. - """ - return _get_node_factory_opset1().create("Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def multiply( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A*B to the input nodes elementwise.""" - return _get_node_factory_opset1().create("Multiply", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def negative(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = -x to the input node elementwise.""" - return _get_node_factory_opset1().create("Negative", [node]) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. Range of values: corner or cente. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - } - - return _get_node_factory_opset1().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def normalize_l2(data: NodeInput, axes: NodeInput, eps: float, eps_mode: str, name: Optional[str] = None) -> Node: - """Construct an NormalizeL2 operation. - - :param data: Node producing the input tensor - :param axes: Node indicating axes along which L2 reduction is calculated - :param eps: The epsilon added to L2 norm - :param eps_mode: how eps is combined with L2 value (`add` or `max`) - :return: New node which performs the L2 normalization. - """ - return _get_node_factory_opset1().create("NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode}) - - -@binary_op -def not_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if input nodes are unequal element-wise. - - :param left_node: The first input node for not-equal operation. - :param right_node: The second input node for not-equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise inequality check. - """ - return _get_node_factory_opset1().create("NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def one_hot( - indices: NodeInput, - depth: NodeInput, - on_value: NodeInput, - off_value: NodeInput, - axis: int, - name: Optional[str] = None, -) -> Node: - """Create node performing one-hot encoding on input data. - - :param indices: Input tensor of rank N with indices of any supported integer data type. - :param depth: Scalar of any supported integer type that specifies number of classes and - the size of one-hot dimension. - :param on_value: Scalar of any type that is the value that the locations - in output tensor represented by indices in input take. - :param off_value: Scalar of any type that is the value that the locations not represented - by indices in input take. - - :param name: The optional name for new output node. - :return: New node performing one-hot operation. - """ - return _get_node_factory_opset1().create("OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis}) - - -@nameable_op -def pad( - arg: NodeInput, - pads_begin: NodeInput, - pads_end: NodeInput, - pad_mode: str, - arg_pad_value: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a generic padding operation. - - :param arg: The node producing input tensor to be padded. - :param pads_begin: number of padding elements to be added before position 0 - on each axis of arg. - :param pads_end: number of padding elements to be added after the last element. - :param pad_mode: "constant", "edge", "reflect" or "symmetric" - :param arg_pad_value: value used for padding if pad_mode is "constant" - :return: Pad operation node. - """ - input_nodes = as_nodes(arg, pads_begin, pads_end) - if arg_pad_value: - input_nodes.append(as_node(arg_pad_value)) - - pad_mode = pad_mode.upper() - return _get_node_factory_opset1().create("Pad", input_nodes, {"pad_mode": pad_mode}) - - -@nameable_op -def parameter(shape: TensorShape, dtype: NumericType = np.float32, name: Optional[str] = None) -> Parameter: - """Return an ngraph Parameter object.""" - element_type = get_element_type(dtype) - return Parameter(element_type, PartialShape(shape)) - - -@binary_op -def power( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which perform element-wise exponentiation operation. - - :param left_node: The node providing the base of operation. - :param right_node: The node providing the exponent of operation. - :param name: The optional name for the new output node. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :return: The new node performing element-wise exponentiation operation on input nodes. - """ - return _get_node_factory_opset1().create("Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node: - """Perform Parametrized Relu operation element-wise on data from input node. - - :param data: The node with data tensor. - :param slope: The node with the multipliers for negative values. - :param name: Optional output node name. - :return: The new node performing a PRelu operation on tensor's channels. - - PRelu uses the following logic: - - .. code-block:: python - - if data < 0: - data = data * slope - elif data >= 0: - data = data - - """ - return _get_node_factory_opset1().create("PRelu", as_nodes(data, slope)) - - -@nameable_op -def prior_box_clustered(output_size: Node, image_size: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes normalized to the input image size. - - :param output_size: 1D tensor with two integer elements [height, width]. Specifies the - spatial size of generated grid with boxes. - :param image_size: 1D tensor with two integer elements [image_height, image_width] that - specifies shape of the image for which boxes are generated. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing PriorBoxClustered operation. - - Available attributes are: - - * widths Specifies desired boxes widths in pixels. - Range of values: floating point positive numbers. - Default value: 1.0 - Required: no - - * heights Specifies desired boxes heights in pixels. - Range of values: floating point positive numbers. - Default value: 1.0 - Required: no - - * clip The flag that denotes if each value in the output tensor should be clipped - within [0,1]. - Range of values: {True, False} - Default value: True - Required: no - - * step_widths The distance between box centers. - Range of values: floating point positive number - Default value: 0.0 - Required: no - - * step_heights The distance between box centers. - Range of values: floating point positive number - Default value: 0.0 - Required: no - - * offset The shift of box respectively to the top left corner. - Range of values: floating point positive number - Default value: None - Required: yes - - * variance Denotes a variance of adjusting bounding boxes. - Range of values: floating point positive numbers - Default value: [] - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - - attrs = { - 'offset': 85, - 'clip': False, - 'step_widths': [1.5, 2.0, 2.5] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("widths", False, np.floating, is_positive_value), - ("heights", False, np.floating, is_positive_value), - ("clip", False, np.bool_, None), - ("step_widths", False, np.floating, is_positive_value), - ("step_heights", False, np.floating, is_positive_value), - ("offset", True, np.floating, is_positive_value), - ("variance", False, np.floating, is_positive_value), - ] - - check_valid_attributes("PriorBoxClustered", attrs, requirements) - - return _get_node_factory_opset1().create("PriorBoxClustered", [output_size, as_node(image_size)], attrs) - - -@nameable_op -def prior_box(layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes and aspect ratios across all dimensions. - - :param layer_shape: Shape of layer for which prior boxes are computed. - :param image_shape: Shape of image to which prior boxes are scaled. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing prior box operation. - - Available attributes are: - - * min_size The minimum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - - * max_size The maximum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - - * aspect_ratio Aspect ratios of prior boxes. - Range of values: set of positive floating point numbers - Default value: [] - Required: no - - * flip The flag that denotes that each aspect_ratio is duplicated and flipped. - Range of values: {True, False} - Default value: False - Required: no - - * clip The flag that denotes if each value in the output tensor should be clipped - to [0,1] interval. - Range of values: {True, False} - Default value: False - Required: no - - * step The distance between box centers. - Range of values: floating point non-negative number - Default value: 0 - Required: no - - * offset This is a shift of box respectively to top left corner. - Range of values: floating point non-negative number - Default value: None - Required: yes - - * variance The variance denotes a variance of adjusting bounding boxes. The attribute - could contain 0, 1 or 4 elements. - Range of values: floating point positive numbers - Default value: [] - Required: no - - * scale_all_sizes The flag that denotes type of inference. - Range of values: False - max_size is ignored - True - max_size is used - Default value: True - Required: no - - * fixed_ratio This is an aspect ratio of a box. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - * fixed_size This is an initial box size (in pixels). - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - * density This is the square root of the number of boxes of each type. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - - attrs = { - 'offset': 85, - 'flip': True, - 'clip': True, - 'fixed_size': [32, 64, 128] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("offset", True, np.floating, is_non_negative_value), - ("min_size", False, np.floating, is_positive_value), - ("max_size", False, np.floating, is_positive_value), - ("aspect_ratio", False, np.floating, is_positive_value), - ("flip", False, np.bool_, None), - ("clip", False, np.bool_, None), - ("step", False, np.floating, is_non_negative_value), - ("variance", False, np.floating, is_positive_value), - ("scale_all_sizes", False, np.bool_, None), - ("fixed_ratio", False, np.floating, is_positive_value), - ("fixed_size", False, np.floating, is_positive_value), - ("density", False, np.floating, is_positive_value), - ] - - check_valid_attributes("PriorBox", attrs, requirements) - - return _get_node_factory_opset1().create("PriorBox", [layer_shape, as_node(image_shape)], attrs) - - -@nameable_op -def proposal( - class_probs: Node, - bbox_deltas: Node, - image_shape: NodeInput, - attrs: dict, - name: Optional[str] = None, -) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. - - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with box logits. - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing Proposal operation. - - * base_size The size of the anchor to which scale and ratio attributes are applied. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * pre_nms_topn The number of bounding boxes before the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * post_nms_topn The number of bounding boxes after the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * nms_thresh The minimum value of the proposal to be taken into consideration. - Range of values: a positive floating-point number - Default value: None - Required: yes - - * feat_stride The step size to slide over boxes (in pixels). - Range of values: a positive unsigned integer - Default value: None - Required: yes - - * min_size The minimum size of box to be taken into consideration. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * ratio The ratios for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - - * scale The scales for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - - * clip_before_nms The flag that specifies whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: True or False - Default value: True - Required: no - - * clip_after_nms The flag that specifies whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: True or False - Default value: False - Required: no - - * normalize The flag that specifies whether to perform normalization of output boxes to - [0,1] interval or not. - Range of values: True or False - Default value: False - Required: no - - * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - - * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates - before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - - * framework Specifies how the box coordinates are calculated. - Range of values: "" (empty string) - calculate box coordinates like in Caffe* - tensorflow - calculate box coordinates like in the TensorFlow* - Object Detection API models - Default value: "" (empty string) - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'base_size': 85, - 'pre_nms_topn': 10, - 'post_nms_topn': 20, - 'nms_thresh': 0.34, - 'feat_stride': 16, - 'min_size': 32, - 'ratio': [0.1, 1.5, 2.0, 2.5], - 'scale': [2, 3, 3, 4], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("base_size", True, np.unsignedinteger, is_positive_value), - ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), - ("post_nms_topn", True, np.unsignedinteger, is_positive_value), - ("nms_thresh", True, np.floating, is_positive_value), - ("feat_stride", True, np.unsignedinteger, is_positive_value), - ("min_size", True, np.unsignedinteger, is_positive_value), - ("ratio", True, np.floating, None), - ("scale", True, np.floating, None), - ("clip_before_nms", False, np.bool_, None), - ("clip_after_nms", False, np.bool_, None), - ("normalize", False, np.bool_, None), - ("box_size_scale", False, np.floating, is_positive_value), - ("box_coordinate_scale", False, np.floating, is_positive_value), - ("framework", False, np.str_, None), - ] - - check_valid_attributes("Proposal", attrs, requirements) - - return _get_node_factory_opset1().create("Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs) - - -@nameable_op -def psroi_pooling( - input: NodeInput, - coords: NodeInput, - output_dim: int, - group_size: int, - spatial_scale: float, - spatial_bins_x: int, - spatial_bins_y: int, - mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a PSROIPooling operation. - - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_dim: Output channel number - :param group_size: Number of groups to encode position-sensitive scores - :param spatial_scale: Ratio of input feature map over input image size - :param spatial_bins_x: Numbers of bins to divide the input feature maps over - :param spatial_bins_y: Numbers of bins to divide the input feature maps over - :param mode: Mode of pooling - "avg" or "bilinear" - :return: PSROIPooling node - """ - mode = mode.lower() - return _get_node_factory_opset1().create( - "PSROIPooling", - as_nodes(input, coords), - { - "output_dim": output_dim, - "group_size": group_size, - "spatial_scale": spatial_scale, - "spatial_bins_x": spatial_bins_x, - "spatial_bins_y": spatial_bins_y, - "mode": mode, - }, - ) - - -@nameable_op -def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces the Range operation. - - :param start: The start value of the generated range - :param stop: The stop value of the generated range - :param step: The step value for the generated range - :param name: Optional name for output node. - :return: Range node - """ - return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step)) - - -@unary_op -def relu(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform rectified linear unit operation on input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. - """ - return _get_node_factory_opset1().create("Relu", [node]) - - -@nameable_op -def reduce_logical_and(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Logical AND reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through AND operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. - """ - return _get_node_factory_opset1().create("ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_logical_or(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Logical OR reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through OR operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. - """ - return _get_node_factory_opset1().create("ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_max(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Max-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to max-reduce. - :param reduction_axes: The axes to eliminate through max operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - """ - return _get_node_factory_opset1().create("ReduceMax", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_mean(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Mean-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset1().create("ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_min(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Min-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to min-reduce. - :param reduction_axes: The axes to eliminate through min operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - """ - return _get_node_factory_opset1().create("ReduceMin", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_prod(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Product-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to product-reduce. - :param reduction_axes: The axes to eliminate through product operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing product-reduction operation. - """ - return _get_node_factory_opset1().create("ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_sum(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Perform element-wise sums of the input tensor, eliminating the specified reduction axes. - - :param node: The node providing data for operation. - :param reduction_axes: The axes to eliminate through summation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: The optional new name for output node. - :return: The new node performing summation along `reduction_axes` element-wise. - """ - return _get_node_factory_opset1().create("ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def region_yolo( - input: Node, - coords: int, - classes: int, - num: int, - do_softmax: bool, - mask: List[int], - axis: int, - end_axis: int, - anchors: Optional[List[float]] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the RegionYolo operation. - - :param input: Input data - :param coords: Number of coordinates for each region - :param classes: Number of classes for each region - :param num: Number of regions - :param do_softmax: Compute softmax - :param mask: Mask - :param axis: Axis to begin softmax on - :param end_axis: Axis to end softmax on - :param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes - :param name: Optional name for output node. - :return: RegionYolo node - """ - if anchors is None: - anchors = [] - - return _get_node_factory_opset1().create( - "RegionYolo", - [input], - { - "coords": coords, - "classes": classes, - "num": num, - "do_softmax": do_softmax, - "mask": mask, - "axis": axis, - "end_axis": end_axis, - "anchors": anchors, - }, - ) - - -@nameable_op -def reshape(node: NodeInput, output_shape: NodeInput, special_zero: bool, name: Optional[str] = None) -> Node: - """Return reshaped node according to provided parameters. - - :param node: The tensor we want to reshape. - :param output_shape: The node with a new shape for input tensor. - :param special_zero: The boolean variable that controls how zero values in shape are - interpreted. If special_zero is false, then 0 is interpreted as-is - which means that output shape will contain a zero dimension at the - specified location. Input and output tensors are empty in this case. - If special_zero is true, then all zeros in shape implies the copying - of corresponding dimensions from data.shape into the output shape. - Range of values: False or True - """ - return _get_node_factory_opset1().create("Reshape", as_nodes(node, output_shape), {"special_zero": special_zero}) - - -@unary_op -def result(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which represents an output of a graph (Function). - - :param data: The tensor containing the input data - :return: Result node - """ - return _get_node_factory_opset1().create("Result", [data]) - - -@nameable_op -def reverse_sequence( - input: NodeInput, - seq_lengths: NodeInput, - batch_axis: NumericData, - seq_axis: NumericData, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a ReverseSequence operation. - - :param input: tensor with input data to reverse - :param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. - :param batch_axis: index of the batch dimension. - :param seq_axis: index of the sequence dimension. - :return: ReverseSequence node - """ - return _get_node_factory_opset1().create( - "ReverseSequence", - as_nodes(input, seq_lengths), - {"batch_axis": batch_axis, "seq_axis": seq_axis}, - ) - - -@nameable_op -def select( - cond: NodeInput, - then_node: NodeInput, - else_node: NodeInput, - auto_broadcast: str = "numpy", - name: Optional[str] = None, -) -> Node: - """Perform an element-wise selection operation on input tensors. - - :param cond: Tensor with selection mask of type `boolean`. - :param then_node: Tensor providing data to be selected if respective `cond` - item value is `True`. - :param else_node: Tensor providing data to be selected if respective `cond` - item value is `False`. - :param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The new node with values selected according to provided arguments. - """ - inputs = as_nodes(cond, then_node, else_node) - return _get_node_factory_opset1().create("Select", inputs, {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def selu(data: NodeInput, alpha: NodeInput, lambda_value: NodeInput, name: Optional[str] = None) -> Node: - """Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise. - - :param data: input node, array or scalar. - :param alpha: Alpha coefficient of SELU operation - :param lambda_value: Lambda coefficient of SELU operation - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. - """ - return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value)) - - -@nameable_op -def shape_of(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. - - :param data: The tensor containing the input data. - :return: ShapeOf node - """ - return _get_node_factory_opset1().create("ShapeOf", [as_node(data)]) - - -@unary_op -def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which applies the sigmoid function element-wise. - - :param data: The tensor containing the input data - :return: Sigmoid node - """ - return _get_node_factory_opset1().create("Sigmoid", [data]) - - -@unary_op -def sign(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform element-wise sign operation. - - :param node: One of: input node, array or scalar. - :param name: The optional new name for output node. - :return: The node with mapped elements of the input tensor to -1 (if it is negative), - 0 (if it is zero), or 1 (if it is positive). - """ - return _get_node_factory_opset1().create("Sign", [node]) - - -@unary_op -def sin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. - """ - return _get_node_factory_opset1().create("Sin", [node]) - - -@unary_op -def sinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. - """ - return _get_node_factory_opset1().create("Sinh", [node]) - - -@nameable_op -def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply softmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which Softmax should be calculated - :return: The new node with softmax operation applied on each element. - """ - return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis}) - - -@nameable_op -def space_to_depth(data: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node: - """Perform SpaceToDepth operation on the input tensor. - - SpaceToDepth rearranges blocks of spatial data into depth. - The operator returns a copy of the input tensor where values from the height - and width dimensions are moved to the depth dimension. - - :param data: The node with data tensor. - :param mode: Specifies how the output depth dimension is gathered from block coordinates. - - blocks_first: The output depth is gathered from [block_size, ..., block_size, C] - depth_first: The output depth is gathered from [C, block_size, ..., block_size] - - :param block_size: The size of the block of values to be moved. Scalar value. - :param name: Optional output node name. - :return: The new node performing a SpaceToDepth operation on input tensor. - """ - return _get_node_factory_opset1().create( - "SpaceToDepth", - [data], - {"mode": mode, "block_size": block_size}, - ) - - -@nameable_op -def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] = None) -> Node: - """Return a node which splits the input tensor into same-length slices. - - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param num_splits: Number of the output tensors that should be produced - :return: Split node - """ - return _get_node_factory_opset1().create("Split", as_nodes(data, axis), {"num_splits": num_splits}) - - -@unary_op -def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies square root to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: The new node with sqrt operation applied element-wise. - """ - return _get_node_factory_opset1().create("Sqrt", [node]) - - -@binary_op -def squared_difference(x1: NodeInput, x2: NodeInput, auto_broadcast: str = "NUMPY", name: Optional[str] = None) -> Node: - r"""Perform an element-wise squared difference between two tensors. - - ..math:: y[i] = (x_1[i] - x_2[i])^2 - - :param x1: The node with first input tensor. - :param x2: The node with second input tensor. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: Optional new name for output node. - :return: The new node performing a squared difference between two tensors. - """ - return _get_node_factory_opset1().create("SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform squeeze operation on input tensor. - - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to squeeze. - One of: input node or array. - :param name: Optional new name for output node. - :return: The new node performing a squeeze operation on input tensor. - - Remove single-dimensional entries from the shape of a tensor. - Takes a parameter `axes` with a list of axes to squeeze. - If `axes` is not provided, all the single dimensions will be removed from the shape. - If an `axis` is selected with shape entry not equal to one, an error is raised. - - - For example: - - Inputs: tensor with shape [1, 2, 1, 3, 1, 1], axes=[2, 4] - - Result: tensor with shape [1, 2, 3, 1] - """ - return _get_node_factory_opset1().create("Squeeze", as_nodes(data, axes)) - - -@nameable_op -def strided_slice( - data: NodeInput, - begin: NodeInput, - end: NodeInput, - strides: NodeInput, - begin_mask: List[int], - end_mask: List[int], - new_axis_mask: Optional[List[int]] = None, - shrink_axis_mask: Optional[List[int]] = None, - ellipsis_mask: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. - - :param data: The tensor to be sliced - :param begin: 1D tensor with begin indexes for input blob slicing - :param end: 1D tensor with end indexes for input blob slicing - :param strides: The slicing strides - :param begin_mask: A mask applied to the 'begin' input indicating which elements - shoud be ignored - :param end_mask: A mask applied to the 'end' input indicating which elements - shoud be ignored - :param new_axis_mask: A mask indicating dimensions where '1' should be inserted - :param shrink_axis_mask: A mask indicating which dimensions should be deleted - :param ellipsis_mask: Indicates positions where missing dimensions should be inserted - :return: StridedSlice node - """ - if new_axis_mask is None: - new_axis_mask = [] - if shrink_axis_mask is None: - shrink_axis_mask = [] - if ellipsis_mask is None: - ellipsis_mask = [] - attributes = { - "begin_mask": begin_mask, - "end_mask": end_mask, - "new_axis_mask": new_axis_mask, - "shrink_axis_mask": shrink_axis_mask, - "ellipsis_mask": ellipsis_mask, - } - - return _get_node_factory_opset1().create("StridedSlice", as_nodes(data, begin, end, strides), attributes) - - -@binary_op -def subtract( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A-B to the input nodes element-wise. - - :param left_node: The node providing data for left hand side of operator. - :param right_node: The node providing data for right hand side of operator. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional name for output node. - :return: The new output node performing subtraction operation on both tensors element-wise. - """ - return _get_node_factory_opset1().create("Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def tan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tan operation applied on it. - """ - return _get_node_factory_opset1().create("Tan", [node]) - - -@unary_op -def tanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies hyperbolic tangent to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tanh operation applied on it. - """ - return _get_node_factory_opset1().create("Tanh", [node]) - - -@nameable_op -def tensor_iterator( - inputs: List[Node], - graph_body: GraphBody, - slice_input_desc: List[TensorIteratorSliceInputDesc], - merged_input_desc: List[TensorIteratorMergedInputDesc], - invariant_input_desc: List[TensorIteratorInvariantInputDesc], - body_output_desc: List[TensorIteratorBodyOutputDesc], - concat_output_desc: List[TensorIteratorConcatOutputDesc], - name: Optional[str] = None, -) -> Node: - """Perform recurrent execution of the network described in the body, iterating through the data. - - :param inputs: The provided to TensorIterator operator. - :param graph_body: The graph representing the body we execute. - :param slice_input_desc: The descriptors describing sliced inputs, that is nodes - representing tensors we iterate through, processing single - data slice in one iteration. - :param merged_input_desc: The descriptors describing merged inputs, that is nodes - representing variables with initial value at first iteration, - which may be changing through iterations. - :param invariant_input_desc: The descriptors describing invariant inputs, that is nodes - representing variable with persistent value through all - iterations. - :param body_output_desc: The descriptors describing body outputs from specified - iteration. - :param concat_output_desc: The descriptors describing specified output values through - all the iterations concatenated into one node. - :param name: The optional name for output node. - :return: Node representing TensorIterator operation. - """ - attributes = { - "body": graph_body.serialize(), - "input_descriptions": { - "slice_input_desc": [desc.serialize() for desc in slice_input_desc], - "merged_input_desc": [desc.serialize() for desc in merged_input_desc], - "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc], - }, - "output_descriptions": { - "body_output_desc": [desc.serialize() for desc in body_output_desc], - "concat_output_desc": [desc.serialize() for desc in concat_output_desc], - }, - } - - return _get_node_factory_opset1().create("TensorIterator", as_nodes(*inputs), attributes) - - -@nameable_op -def tile(data: NodeInput, repeats: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. - - :param data: The input tensor to be tiled - :param repeats: Per-dimension replication factors - :return: Tile node - """ - return _get_node_factory_opset1().create("Tile", as_nodes(data, repeats)) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :return: The new node which performs TopK (both indices and values) - """ - return _get_node_factory_opset1().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort}, - ) - - -@nameable_op -def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which transposes the data in the input tensor. - - :param data: The input tensor to be transposed - :param input_order: Permutation of axes to be applied to the input tensor - :return: Transpose node - """ - return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) - - -def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform unsqueeze operation on input tensor. - - Insert single-dimensional entries to the shape of a tensor. Takes one required argument axes, - a list of dimensions that will be inserted. - Dimension indices in axes are as seen in the output tensor. - - For example: Inputs: tensor with shape [3, 4, 5], axes=[0, 4] - Result: tensor with shape [1, 3, 4, 5, 1] - - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to be inserted. - One of: input node or array. - :return: The new node performing an unsqueeze operation on input tensor. - """ - return _get_node_factory_opset1().create("Unsqueeze", as_nodes(data, axes)) - - -@nameable_op -def variadic_split(data: NodeInput, axis: NodeInput, split_lengths: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which splits the input tensor into variadic length slices. - - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param split_lengths: Sizes of the output tensors along the split axis - :return: VariadicSplit node - """ - return _get_node_factory_opset1().create("VariadicSplit", as_nodes(data, axis, split_lengths)) diff --git a/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py deleted file mode 100644 index ff4e4e4b39483a..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset10.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset10.ops import is_finite -from ngraph.opset10.ops import is_inf -from ngraph.opset10.ops import is_nan -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset10.ops import unique -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset10/ops.py b/src/bindings/python/src/compatibility/ngraph/opset10/ops.py deleted file mode 100644 index b27c21e9dcbab0..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset10/ops.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all openvino ops.""" -from functools import partial -from typing import List, Optional - -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, - as_node, - make_constant_node, -) - -_get_node_factory_opset4 = partial(_get_node_factory, "opset4") -_get_node_factory_opset10 = partial(_get_node_factory, "opset10") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def interpolate( - image: NodeInput, - output_shape: NodeInput, - scales: NodeInput, - mode: str, - shape_calculation_mode: str, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - nearest_mode: str = "round_prefer_floor", - antialias: bool = False, - cube_coeff: float = -0.75, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Perform interpolation of independent slices in input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param output_shape: 1D tensor describing output shape for spatial axes. - :param scales: 1D tensor describing scales for spatial axes. - :param mode: Specifies type of interpolation. Possible values are: nearest, linear, - linear_onnx, cubic. - :param shape_calculation_mode: - Specifies which input, sizes or scales, is used to calculate an output - shape. - :param pads_begin: Specifies the number of pixels to add to the beginning of the image - being interpolated. Default is None. - :param pads_end: Specifies the number of pixels to add to the end of the image being - interpolated. Default is None. - :param coordinate_transformation_mode: - Specifies how to transform the coordinate in the resized tensor to the - coordinate in the original tensor. Default is "half_pixel". - :param nearest_mode: Specifies round mode when mode == nearest and is used only when - mode == nearest. Default is "round_prefer_floor". - :param antialias: Specifies whether to perform anti-aliasing. Default is False. - :param cube_coeff: Specifies the parameter a for cubic interpolation. Default is -0.75. - :param axes: 1D tensor specifying dimension indices where interpolation is applied. - Default is None. - :param name: Optional name for the output node. Default is None. - :return: Node representing interpolation operation. - """ - attrs = { - "mode": mode, - "shape_calculation_mode": shape_calculation_mode, - "coordinate_transformation_mode": coordinate_transformation_mode, - "nearest_mode": nearest_mode, - "antialias": antialias, - "cube_coeff": cube_coeff, - } - - attrs["pads_begin"] = [] if pads_begin is None else pads_begin - attrs["pads_end"] = [] if pads_end is None else pads_end - - inputs = as_nodes(image, output_shape, scales) if axes is None else as_nodes(image, output_shape, scales, axes) - - # This is an update of the operator version, so even though this is opset 10, - # the operator is taken from opset 4. - return _get_node_factory_opset4().create("Interpolate", inputs, attrs) - - -@nameable_op -def is_finite(data: NodeInput, name: Optional[str] = None) -> Node: - """Performs element-wise mapping from NaN and Infinity to False. Other values are mapped to True. - - :param data: A tensor of floating-point numeric type and arbitrary shape. - :param name: Optional name for the output node. The default is None. - :return: Node representing is_finite operation. - """ - return _get_node_factory_opset10().create("IsFinite", as_nodes(data)) - - -@nameable_op -def is_inf( - data: NodeInput, - attributes: Optional[dict] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs IsInf operation. - - :param data: The input tensor. - :param attributes: Optional dictionary containing IsInf attributes. - :param name: Optional name of the node. - - Available attributes: - - * detect_negative Specifies whether to map negative infinities to true in output map. - Range of values: true, false - Default value: true - Required: no - * detect_positive Specifies whether to map positive infinities to true in output map. - Range of values: true, false - Default value: true - Required: no - - :return: A new IsInf node. - """ - if not attributes: - attributes = {} - return _get_node_factory_opset10().create("IsInf", as_nodes(data), attributes) - - -@nameable_op -def is_nan(data: NodeInput, name: Optional[str] = None) -> Node: - """Performs element-wise mapping from NaN to True. Other values are mapped to False. - - :param data: A tensor of floating point numeric type and arbitrary shape. - :param name: Optional name for the output node. Default is None. - :return: Node representing is_nan operation. - """ - return _get_node_factory_opset10().create("IsNaN", as_nodes(data)) - - -@nameable_op -def unique( - data: NodeInput, - axis: Optional[NodeInput] = None, - sorted: Optional[bool] = True, - index_element_type: Optional[str] = "i64", - count_element_type: Optional[str] = "i64", - name: Optional[str] = None, -) -> Node: - """Operator which selects and returns unique elements or unique slices of the input tensor. - - :param data: Input data tensor. - :param axis: (Optional) An input tensor containing the axis value. - If not provided or None, data input is considered as a flattened tensor. - Default value: None. - :param sorted: (Optional) Controls the order of the returned unique values, - sorts ascendingly when true. - Default value: True. - :param index_element_type: (Optional) The data type set for outputs containing indices. - Default value: "i64". - :param count_element_type: (Optional) The data type set for the output with repetition count. - Default value: "i64". - :param name: (Optional) A name for the output node. Default value: None. - :return: Node representing Unique operation. - """ - if axis is None: - inputs = as_nodes(data) - else: - inputs = as_nodes(data, axis) - - attributes = { - "sorted": sorted, - "index_element_type": index_element_type, - "count_element_type": count_element_type, - } - return _get_node_factory_opset10().create("Unique", inputs, attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py deleted file mode 100644 index 047c93e4cc03d3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset11.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset10.ops import is_finite -from ngraph.opset10.ops import is_inf -from ngraph.opset10.ops import is_nan -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset11.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset10.ops import unique -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset11/ops.py b/src/bindings/python/src/compatibility/ngraph/opset11/ops.py deleted file mode 100644 index 3a4b54059ca6fc..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset11/ops.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all openvino ops.""" -from functools import partial -from typing import List, Optional - -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, -) - -_get_node_factory_opset11 = partial(_get_node_factory, "opset11") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def interpolate( - image: NodeInput, - scales_or_sizes: NodeInput, - mode: str, - shape_calculation_mode: str, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - nearest_mode: str = "round_prefer_floor", - antialias: bool = False, - cube_coeff: float = -0.75, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Performs the interpolation of the input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param scales_or_sizes: - 1D tensor providing information used to calculate the output shape - of the operation. It might contain floats (scales) or integers(sizes). - :param mode: Specifies type of interpolation. Possible values are: nearest, linear, - linear_onnx, cubic, bilinear_pillow, bicubic_pillow. - :param shape_calculation_mode: - Specifies how the scales_or_sizes input should be interpreted. - :param pads_begin: Specifies the number of pixels to add to the beginning of the image - being interpolated. Default is None. - :param pads_end: Specifies the number of pixels to add to the end of the image being - interpolated. Default is None. - :param coordinate_transformation_mode: - Specifies how to transform the coordinate in the resized tensor to the - coordinate in the original tensor. Default is "half_pixel". - :param nearest_mode: Specifies round mode when mode == nearest and is used only when - mode == nearest. Default is "round_prefer_floor". - :param antialias: Specifies whether to perform anti-aliasing. Default is False. - :param cube_coeff: Specifies the parameter a for cubic interpolation. Default is -0.75. - :param axes: 1D tensor specifying dimension indices where interpolation is applied. - The default is None. - :param name: Optional name for the output node. The default is None. - :return: Node representing the interpolation operation. - """ - attrs = { - "mode": mode, - "shape_calculation_mode": shape_calculation_mode, - "coordinate_transformation_mode": coordinate_transformation_mode, - "nearest_mode": nearest_mode, - "antialias": antialias, - "cube_coeff": cube_coeff, - } - - attrs["pads_begin"] = [] if pads_begin is None else pads_begin - attrs["pads_end"] = [] if pads_end is None else pads_end - - inputs = as_nodes(image, scales_or_sizes) if axes is None else as_nodes(image, scales_or_sizes, axes) - - return _get_node_factory_opset11().create("Interpolate", inputs, attrs) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - index_element_type: str = "i32", - stable: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :param index_element_type: Type of output tensor with indices. - :param stable: Specifies whether the equivalent elements should maintain - their relative order from the input tensor during sorting. - :return: The new node which performs TopK - """ - return _get_node_factory_opset11().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type, "stable": stable}, - ) diff --git a/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py deleted file mode 100644 index cd30551ec091b0..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset1.ops import broadcast -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset1.ops import non_max_suppression -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset2.ops import roi_pooling -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset1.ops import shape_of -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset1.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset2/ops.py b/src/bindings/python/src/compatibility/ngraph/opset2/ops.py deleted file mode 100644 index 412e8a7bd894b3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset2/ops.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset2 = partial(_get_node_factory, "opset2") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def batch_to_space( - data: NodeInput, - block_shape: NodeInput, - crops_begin: NodeInput, - crops_end: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform BatchToSpace operation on the input tensor. - - BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions. - - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. - :param crops_end: Specifies the amount to crop from the end along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a BatchToSpace operation. - """ - return _get_node_factory_opset2().create("BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end)) - - -@unary_op -def gelu(node: NodeInput, name: Optional[str] = None) -> Node: - r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node. - - Computes GELU function: - - .. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) - - For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>) - - :param node: Input tensor. One of: input node, array or scalar. - :param name: Optional output node name. - :return: The new node performing a GELU operation on its input data element-wise. - """ - return _get_node_factory_opset2().create("Gelu", [node]) - - -@nameable_op -def mvn( - data: Node, - across_channels: bool = False, - normalize_variance: bool = False, - eps: float = 1e-9, - name: Optional[str] = None, -) -> Node: - r"""Perform Mean Variance Normalization operation on data from input node. - - Computes MVN on the input tensor `data` (called `X`) using formula: - - ..math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} - - :param data: The node with data tensor. - :param across_channels: Denotes if mean values are shared across channels. - :param normalize_variance: Denotes whether to perform variance normalization. - :param eps: The number added to the variance to avoid division by zero - when normalizing the value. Scalar value. - :param name: Optional output node name. - :return: The new node performing a MVN operation on input tensor. - """ - return _get_node_factory_opset2().create( - "MVN", - [data], - {"across_channels": across_channels, "normalize_variance": normalize_variance, "eps": eps}, - ) - - -@nameable_op -def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node: - """Return a node which produces the ReorgYolo operation. - - :param input: Input data - :param stride: Stride to reorganize input by - :param name: Optional name for output node. - :return: ReorgYolo node - """ - return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride}) - - -@nameable_op -def roi_pooling( - input: NodeInput, - coords: NodeInput, - output_size: TensorShape, - spatial_scale: NumericData, - method: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces an ROIPooling operation. - - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_size: Height/Width of ROI output features (shape) - :param spatial_scale: Ratio of input feature map over input image size (float) - :param method: Method of pooling - string: "max" or "bilinear" - :return: ROIPooling node - """ - method = method.lower() - return _get_node_factory_opset2().create( - "ROIPooling", - as_nodes(input, coords), - {"output_size": Shape(output_size), "spatial_scale": spatial_scale, "method": method}, - ) - - -@nameable_op -def space_to_batch( - data: NodeInput, - block_shape: NodeInput, - pads_begin: NodeInput, - pads_end: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform SpaceToBatch operation on the input tensor. - - SpaceToBatch permutes data tensor blocks of spatial data into batch dimension. - The operator returns a copy of the input tensor where values from spatial blocks dimensions - are moved in the batch dimension - - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param pads_begin: Specifies the padding for the beginning along each axis of `data`. - :param pads_end: Specifies the padding for the ending along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a SpaceToBatch operation. - """ - return _get_node_factory_opset2().create("SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end)) diff --git a/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py deleted file mode 100644 index 06cd926cc4513c..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset3.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py b/src/bindings/python/src/compatibility/ngraph/opset3/ops.py deleted file mode 100644 index 7d7c757d9cd5dc..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset3 = partial(_get_node_factory, "opset3") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param new_value: Node producing a value to be assigned to a variable. - :param variable_id: Id of a variable to be updated. - :param name: Optional name for output node. - :return: Assign node - """ - return _get_node_factory_opset3().create("Assign", [as_node(new_value)], {"variable_id": variable_id}) - - -@nameable_op -def broadcast( - data: NodeInput, - target_shape: NodeInput, - axes_mapping: Optional[NodeInput] = None, - broadcast_spec: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. - - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result - that are being broadcast. - :param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL. - :param name: Optional new name for output node. - :return: New node with broadcast shape. - """ - inputs = as_nodes(data, target_shape) - if broadcast_spec.upper() == "EXPLICIT": - inputs.append(as_node(axes_mapping)) - return _get_node_factory_opset3().create("Broadcast", inputs, {"mode": broadcast_spec.upper()}) - - -@nameable_op -def bucketize( - data: Node, - buckets: NodeInput, - output_type: str = "i64", - with_right_bound: bool = True, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the Bucketize operation. - - :param data: Input data to bucketize - :param buckets: 1-D of sorted unique boundaries for buckets - :param output_type: Output tensor type, "i64" or "i32", defaults to i64 - :param with_right_bound: indicates whether bucket includes the right or left - edge of interval. default true = includes right edge - :param name: Optional name for output node. - :return: Bucketize node - """ - return _get_node_factory_opset3().create( - "Bucketize", - [data, as_node(buckets)], - {"output_type": output_type, "with_right_bound": with_right_bound}, - ) - - -@nameable_op -def cum_sum( - arg: NodeInput, - axis: NodeInput, - exclusive: bool = False, - reverse: bool = False, - name: Optional[str] = None, -) -> Node: - """Construct a cumulative summation operation. - - :param arg: The tensor to be summed. - :param axis: zero dimension tensor specifying axis position along which sum will be performed. - :param exclusive: if set to true, the top element is not included - :param reverse: if set to true, will perform the sums in reverse direction - :return: New node performing the operation - """ - return _get_node_factory_opset3().create("CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse}) - - -@nameable_op -def embedding_bag_offsets_sum( - emb_table: Node, - indices: NodeInput, - offsets: NodeInput, - default_index: Optional[NodeInput] = None, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs sums of bags of embeddings without the intermediate embeddings. - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param offsets: Tensor containing the starting index positions of each bag in indices. - :param per_sample_weights: Tensor with weights for each sample. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param name: Optional name for output node. - :return: The new node which performs EmbeddingBagOffsetsSum - """ - inputs = [emb_table, as_node(indices), as_node(offsets)] - if per_sample_weights is not None: - inputs.append(default_index) - inputs.append(per_sample_weights) - elif default_index is not None: - inputs.append(default_index) - - return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {}) - - -@nameable_op -def embedding_bag_packed_sum( - emb_table: NodeInput, - indices: NodeInput, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return an EmbeddingBagPackedSum node. - - EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given - input tensor with a row (from the weights matrix) at that index - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingBagPackedSum node - """ - inputs = [as_node(emb_table), as_node(indices)] - if per_sample_weights is not None: - inputs.append(as_node(per_sample_weights)) - - return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {}) - - -@nameable_op -def embedding_segments_sum( - emb_table: Node, - indices: NodeInput, - segment_ids: NodeInput, - num_segments: Optional[NodeInput] = None, - default_index: Optional[NodeInput] = None, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return an EmbeddingSegmentsSum node. - - EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given - input tensor with a row (from the weights matrix) at that index - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param segment_ids: Tensor with indices into the output Tensor - :param num_segments: Tensor with number of segments. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingSegmentsSum node - """ - inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)] - if per_sample_weights is not None: - inputs.append(as_node(num_segments)) - inputs.append(as_node(default_index)) - inputs.append(as_node(per_sample_weights)) - elif default_index is not None: - inputs.append(as_node(num_segments)) - inputs.append(as_node(default_index)) - elif num_segments is not None: - inputs.append(as_node(num_segments)) - - return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {}) - - -@nameable_op -def extract_image_patches( - image: NodeInput, - sizes: TensorShape, - strides: List[int], - rates: TensorShape, - auto_pad: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the ExtractImagePatches operation. - - :param image: 4-D Input data to extract image patches. - :param sizes: Patch size in the format of [size_rows, size_cols]. - :param strides: Patch movement stride in the format of [stride_rows, stride_cols] - :param rates: Element seleciton rate for creating a patch. - :param auto_pad: Padding type. - :param name: Optional name for output node. - :return: ExtractImagePatches node - """ - return _get_node_factory_opset3().create( - "ExtractImagePatches", - [as_node(image)], - {"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad}, - ) - - -@nameable_op -def gru_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - linear_before_reset: bool = False, - name: Optional[str] = None, -) -> Node: - """Perform GRUCell operation on the tensor from input node. - - GRUCell represents a single GRU Cell that computes the output - using the formula described in the paper: https://arxiv.org/abs/1406.1078 - - Note this class represents only single *cell* and not whole *layer*. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: - [batch_size, hidden_size]. - :param W: The weights for matrix multiplication, gate order: zrh. - Shape: [3*hidden_size, input_size]. - :param R: The recurrence weights for matrix multiplication. - Shape: [3*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - For linear_before_reset set True the shape is [4*hidden_size]. - Otherwise the shape is [3*hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. - Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in - order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order - respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of - activation functions. - :param linear_before_reset: Flag denotes if the layer behaves according to the modification - of GRUCell described in the formula in the ONNX documentation. - :param name: Optional output node name. - :return: The new node performing a GRUCell operation on tensor from input node. - """ - if activations is None: - activations = ["sigmoid", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - input_nodes = as_nodes(X, initial_hidden_state, W, R, B) - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "linear_before_reset": linear_before_reset, - "clip": clip, - } - return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def non_zero( - data: NodeInput, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return the indices of the elements that are non-zero. - - :param data: Input data. - :param output_type: Output tensor type. - - :return: The new node which performs NonZero - """ - return _get_node_factory_opset3().create("NonZero", [as_node(data)], {"output_type": output_type}) - - -@nameable_op -def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param init_value: Node producing a value to be returned instead of an unassigned variable. - :param variable_id: Id of a variable to be read. - :param name: Optional name for output node. - :return: ReadValue node - """ - return _get_node_factory_opset3().create("ReadValue", [as_node(init_value)], {"variable_id": variable_id}) - - -@nameable_op -def rnn_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: List[str], - activations_alpha: List[float], - activations_beta: List[float], - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Perform RNNCell operation on tensor from input node. - - It follows notation and equations defined as in ONNX standard: - https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN - - Note this class represents only single *cell* and not whole RNN *layer*. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: - [batch_size, hidden_size]. - :param W: The weight tensor with shape: [hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [hidden_size, - hidden_size]. - :param B: The sum of biases (weight and recurrence) with shape: [hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. - Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in - order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order - respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of - activation functions. - :param name: Optional output node name. - :return: The new node performing a RNNCell operation on tensor from input node. - """ - if activations is None: - activations = ["tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - input_nodes = as_nodes(X, initial_hidden_state, W, R, B) - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes) - - -@nameable_op -def roi_align( - data: NodeInput, - rois: NodeInput, - batch_indices: NodeInput, - pooled_h: int, - pooled_w: int, - sampling_ratio: int, - spatial_scale: float, - mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs ROIAlign. - - :param data: Input data. - :param rois: RoIs (Regions of Interest) to pool over. - :param batch_indices: Tensor with each element denoting the index of - the corresponding image in the batch. - :param pooled_h: Height of the ROI output feature map. - :param pooled_w: Width of the ROI output feature map. - :param sampling_ratio: Number of bins over height and width to use to calculate - each output feature map element. - :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. - :param mode: Method to perform pooling to produce output feature map elements. - - :return: The new node which performs ROIAlign - """ - inputs = as_nodes(data, rois, batch_indices) - attributes = { - "pooled_h": pooled_h, - "pooled_w": pooled_w, - "sampling_ratio": sampling_ratio, - "spatial_scale": spatial_scale, - "mode": mode, - } - return _get_node_factory_opset3().create("ROIAlign", inputs, attributes) - - -@nameable_op -def scatter_elements_update( - data: NodeInput, - indices: NodeInput, - updates: NodeInput, - axis: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a ScatterElementsUpdate operation. - - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis for scatter. - :return: ScatterElementsUpdate node - - ScatterElementsUpdate creates a copy of the first input tensor with updated elements - specified with second and third input tensors. - - For each entry in `updates`, the target index in `data` is obtained by combining - the corresponding entry in `indices` with the index of the entry itself: the - index-value for dimension equal to `axis` is obtained from the value of the - corresponding entry in `indices` and the index-value for dimension not equal - to `axis` is obtained from the index of the entry itself. - - """ - return _get_node_factory_opset3().create("ScatterElementsUpdate", as_nodes(data, indices, updates, axis)) - - -@nameable_op -def scatter_update(data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces a ScatterUpdate operation. - - ScatterUpdate sets new values to slices from data addressed by indices. - - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis at which elements will be updated. - :return: ScatterUpdate node - """ - return _get_node_factory_opset3().create("ScatterUpdate", as_nodes(data, indices, updates, axis)) - - -@nameable_op -def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. - - :param data: The tensor containing the input data. - :param output_type: Output element type. - :return: ShapeOf node - """ - return _get_node_factory_opset3().create("ShapeOf", [as_node(data)], {"output_type": output_type}) - - -@nameable_op -def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = None) -> Node: - """Perform permutation on data in the channel dimension of the input tensor. - - :param data: The node with input tensor. - :param axis: Channel dimension index in the data tensor. - A negative value means that the index should be calculated - from the back of the input data shape. - :param group: The channel dimension specified by the axis parameter - should be split into this number of groups. - :param name: Optional output node name. - :return: The new node performing a permutation on data in the channel dimension - of the input tensor. - - The operation is the equivalent with the following transformation of the input tensor - `data` of shape [N, C, H, W]: - - `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) - - `data_transposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) - - `output` = reshape(`data_transposed`, [N, C, H, W]) - - For example: - - .. code-block:: python - - Inputs: tensor of shape [1, 6, 2, 2] - - data = [[[[ 0., 1.], [ 2., 3.]], - [[ 4., 5.], [ 6., 7.]], - [[ 8., 9.], [10., 11.]], - [[12., 13.], [14., 15.]], - [[16., 17.], [18., 19.]], - [[20., 21.], [22., 23.]]]] - - axis = 1 - groups = 3 - - Output: tensor of shape [1, 6, 2, 2] - - output = [[[[ 0., 1.], [ 2., 3.]], - [[ 8., 9.], [10., 11.]], - [[16., 17.], [18., 19.]], - [[ 4., 5.], [ 6., 7.]], - [[12., 13.], [14., 15.]], - [[20., 21.], [22., 23.]]]] - """ - return _get_node_factory_opset3().create("ShuffleChannels", [as_node(data)], {"axis": axis, "group": group}) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - index_element_type: str = "i32", - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :param index_element_type: Type of output tensor with indices. - :return: The new node which performs TopK (both indices and values) - """ - return _get_node_factory_opset3().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type}, - ) diff --git a/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py deleted file mode 100644 index 09406337a43966..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset4.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset4/ops.py b/src/bindings/python/src/compatibility/ngraph/opset4/ops.py deleted file mode 100644 index 4526725f2e982f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset4/ops.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset4 = partial(_get_node_factory, "opset4") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def ctc_loss( - logits: NodeInput, - logit_length: NodeInput, - labels: NodeInput, - label_length: NodeInput, - blank_index: Optional[NodeInput] = None, - preprocess_collapse_repeated: bool = False, - ctc_merge_repeated: bool = True, - unique: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs CTCLoss. - - :param logits: 3-D tensor of logits. - :param logit_length: 1-D tensor of lengths for each object from a batch. - :param labels: 2-D tensor of labels for which likelihood is estimated using logits. - :param label_length: 1-D tensor of length for each label sequence. - :param blank_index: Scalar used to mark a blank index. - :param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. - :param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. - :param unique: Flag to find unique elements in a target. - :return: The new node which performs CTCLoss - """ - if blank_index is not None: - inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) - else: - inputs = as_nodes(logits, logit_length, labels, label_length) - - attributes = { - "preprocess_collapse_repeated": preprocess_collapse_repeated, - "ctc_merge_repeated": ctc_merge_repeated, - "unique": unique, - } - - return _get_node_factory_opset4().create("CTCLoss", inputs, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset4().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def softplus(data: NodeInput, name: Optional[str] = None) -> Node: - """Apply SoftPlus operation on each element of input tensor. - - :param data: The tensor providing input data. - :return: The new node with SoftPlus operation applied on each element. - """ - return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {}) - - -@nameable_op -def mish( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs Mish. - - :param data: Tensor with input data floating point type. - :return: The new node which performs Mish - """ - return _get_node_factory_opset4().create("Mish", as_nodes(data), {}) - - -@nameable_op -def hswish( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs HSwish (hard version of Swish). - - :param data: Tensor with input data floating point type. - :return: The new node which performs HSwish - """ - return _get_node_factory_opset4().create("HSwish", as_nodes(data), {}) - - -@nameable_op -def swish( - data: NodeInput, - beta: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). - - :param data: Tensor with input data floating point type. - :return: The new node which performs Swish - """ - if beta is None: - beta = make_constant_node(1.0, np.float32) - return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {}) - - -@nameable_op -def acosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccosh operation applied on it. - """ - return _get_node_factory_opset4().create("Acosh", as_nodes(node)) - - -@nameable_op -def asinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse sinus function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsinh operation applied on it. - """ - return _get_node_factory_opset4().create("Asinh", as_nodes(node)) - - -@nameable_op -def atanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctanh operation applied on it. - """ - return _get_node_factory_opset4().create("Atanh", as_nodes(node)) - - -@nameable_op -def proposal( - class_probs: Node, - bbox_deltas: Node, - image_shape: NodeInput, - attrs: dict, - name: Optional[str] = None, -) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. - - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - * base_size The size of the anchor to which scale and ratio attributes are applied. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * pre_nms_topn The number of bounding boxes before the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * post_nms_topn The number of bounding boxes after the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * nms_thresh The minimum value of the proposal to be taken into consideration. - Range of values: a positive floating-point number - Default value: None - Required: yes - * feat_stride The step size to slide over boxes (in pixels). - Range of values: a positive unsigned integer - Default value: None - Required: yes - * min_size The minimum size of box to be taken into consideration. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * ratio The ratios for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - * scale The scales for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - * clip_before_nms The flag that specifies whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: True or False - Default value: True - Required: no - * clip_after_nms The flag that specifies whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: True or False - Default value: False - Required: no - * normalize The flag that specifies whether to perform normalization of output boxes to - [0,1] interval or not. - Range of values: True or False - Default value: False - Required: no - * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates - before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - * framework Specifies how the box coordinates are calculated. - Range of values: "" (empty string) - calculate box coordinates like in Caffe* - tensorflow - calculate box coordinates like in the TensorFlow* - Object Detection API models - Default value: "" (empty string) - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'base_size': 85, - 'pre_nms_topn': 10, - 'post_nms_topn': 20, - 'nms_thresh': 0.34, - 'feat_stride': 16, - 'min_size': 32, - 'ratio': [0.1, 1.5, 2.0, 2.5], - 'scale': [2, 3, 3, 4], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing Proposal operation. - """ - requirements = [ - ("base_size", True, np.unsignedinteger, is_positive_value), - ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), - ("post_nms_topn", True, np.unsignedinteger, is_positive_value), - ("nms_thresh", True, np.floating, is_positive_value), - ("feat_stride", True, np.unsignedinteger, is_positive_value), - ("min_size", True, np.unsignedinteger, is_positive_value), - ("ratio", True, np.floating, None), - ("scale", True, np.floating, None), - ("clip_before_nms", False, np.bool_, None), - ("clip_after_nms", False, np.bool_, None), - ("normalize", False, np.bool_, None), - ("box_size_scale", False, np.floating, is_positive_value), - ("box_coordinate_scale", False, np.floating, is_positive_value), - ("framework", False, np.str_, None), - ] - - check_valid_attributes("Proposal", attrs, requirements) - - return _get_node_factory_opset4().create("Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs) - - -@nameable_op -def reduce_l1(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """L1-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset4().create("ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_l2(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """L2-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset4().create("ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def lstm_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMCell operation. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMCell. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset4().create("LSTMCell", node_inputs, attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py deleted file mode 100644 index a7cdd05d652de9..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset5/ops.py b/src/bindings/python/src/compatibility/ngraph/opset5/ops.py deleted file mode 100644 index 0baf48becd26b3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset5/ops.py +++ /dev/null @@ -1,426 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset5 = partial(_get_node_factory, "opset5") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def batch_norm_inference( - data: NodeInput, - gamma: NodeInput, - beta: NodeInput, - mean: NodeInput, - variance: NodeInput, - epsilon: float, - name: Optional[str] = None, -) -> Node: - """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. - - :param data: The input tensor with data for normalization. - :param gamma: The scalar scaling for normalized value. - :param beta: The bias added to the scaled normalized value. - :param mean: The value for mean normalization. - :param variance: The value for variance normalization. - :param epsilon: The number to be added to the variance to avoid division - by zero when normalizing a value. - :param name: The optional name of the output node. - :return: The new node which performs BatchNormInference. - """ - inputs = as_nodes(data, gamma, beta, mean, variance) - return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon}) - - -@nameable_op -def gather_nd( - data: NodeInput, - indices: NodeInput, - batch_dims: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherND. - - :param data: N-D tensor with data for gathering - :param indices: K-D tensor of tuples with indices by which data is gathered - :param batch_dims: Scalar value of batch dimensions - :return: The new node which performs GatherND - """ - inputs = as_nodes(data, indices) - - attributes = {"batch_dims": batch_dims} - - return _get_node_factory_opset5().create("GatherND", inputs, attributes) - - -@nameable_op -def log_softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply LogSoftmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which LogSoftmax should be calculated - :return: The new node with LogSoftmax operation applied on each element. - """ - return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis}) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - soft_nms_sigma: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - if soft_nms_sigma is None: - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - else: - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma) - - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset5().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = None) -> Node: - """Apply Round operation on each element of input tensor. - - :param data: The tensor providing input data. - :param mode: Rule to round halfway cases. If set to 'half_to_even' then halfs round to the nearest even - integer or rounding in such a way that the result heads away from zero if `mode` attribute is - 'half_away_from_zero`. - :param name: An optional name of the output node. - :return: The new node with Round operation applied on each element. - """ - return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()}) - - -@nameable_op -def lstm_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param initial_cell_state: The cell state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Expected format: fico - Shape: [num_directions, 4*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Expected format: fico - Shape: [num_directions, 4*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). Expected format: fico - Shape: [num_directions, 4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMSequence. Node outputs count: 3. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset5().create("LSTMSequence", node_inputs, attributes) - - -def hsigmoid( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs HSigmoid. - - :param data: Tensor with input data floating point type. - :return: The new node which performs HSigmoid - """ - return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {}) - - -@nameable_op -def gru_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - linear_before_reset: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GRUSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, 3*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, 3*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - For linear_before_reset set True the shape is [num_directions, 4*hidden_size]. - Otherwise the shape is [num_directions, 3*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param linear_before_reset: Flag denotes if the layer behaves according to the modification - of GRU described in the formula in the ONNX documentation. - :param name: An optional name of the output node. - - :return: The new node represents GRUSequence. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "linear_before_reset": linear_before_reset, - "clip": clip, - } - return _get_node_factory_opset5().create("GRUSequence", node_inputs, attributes) - - -@nameable_op -def rnn_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs RNNSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - Shape: [num_directions, hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents RNNSequence. Node outputs count: 2. - """ - if activations is None: - activations = ["tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - - return _get_node_factory_opset5().create("RNNSequence", inputs, attributes) - - -@nameable_op -def loop( - trip_count: NodeInput, - execution_condition: NodeInput, - inputs: List[Node], - graph_body: GraphBody, - slice_input_desc: List[TensorIteratorSliceInputDesc], - merged_input_desc: List[TensorIteratorMergedInputDesc], - invariant_input_desc: List[TensorIteratorInvariantInputDesc], - body_output_desc: List[TensorIteratorBodyOutputDesc], - concat_output_desc: List[TensorIteratorConcatOutputDesc], - body_condition_output_idx: int, - current_iteration_input_idx: int = -1, - name: Optional[str] = None, -) -> Node: - """Perform recurrent execution of the network described in the body, iterating through the data. - - :param trip_count: A scalar or 1D tensor with 1 element specifying - maximum number of iterations. - :param execution_condition: A scalar or 1D tensor with 1 element - specifying whether to execute the first iteration or not. - :param inputs: The provided to TensorIterator operator. - :param graph_body: The graph representing the body we execute. - :param slice_input_desc: The descriptors describing sliced inputs, that is nodes - representing tensors we iterate through, processing single - data slice in one iteration. - :param merged_input_desc: The descriptors describing merged inputs, that is nodes - representing variables with initial value at first iteration, - which may be changing through iterations. - :param invariant_input_desc: The descriptors describing invariant inputs, that is nodes - representing variable with persistent value through all - iterations. - :param body_output_desc: The descriptors describing body outputs from specified - iteration. - :param concat_output_desc: The descriptors describing specified output values through - all the iterations concatenated into one node. - :param body_condition_output_idx: Determines the purpose of the corresponding result in - the graph_body. This result will determine the dynamic - exit condition. If the value of this result is False, - then iterations stop. - :param current_iteration_input_idx: Determines the purpose of the corresponding parameter - in the graph_body. This parameter will be used as - an iteration counter. Optional. - :return: The new node which performs Loop. - """ - attributes = { - "body": graph_body.serialize(), - "input_descriptions": { - "slice_input_desc": [desc.serialize() for desc in slice_input_desc], - "merged_input_desc": [desc.serialize() for desc in merged_input_desc], - "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc], - }, - "output_descriptions": { - "body_output_desc": [desc.serialize() for desc in body_output_desc], - "concat_output_desc": [desc.serialize() for desc in concat_output_desc], - }, - "special_body_ports": {"body_condition_output_idx": body_condition_output_idx, "current_iteration_input_idx": current_iteration_input_idx}, - } - return _get_node_factory_opset5().create("Loop", as_nodes(trip_count, execution_condition, *inputs), attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py deleted file mode 100644 index 5dca25e0a0576b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset6.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset6.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset6/ops.py b/src/bindings/python/src/compatibility/ngraph/opset6/ops.py deleted file mode 100644 index bf8e1eb8dc797b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset6/ops.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset6 = partial(_get_node_factory, "opset6") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def ctc_greedy_decoder_seq_len( - data: NodeInput, - sequence_length: NodeInput, - blank_index: Optional[NodeInput] = None, - merge_repeated: bool = True, - classes_index_type: str = "i32", - sequence_length_type: str = "i32", - name: Optional[str] = None, -) -> Node: - """Return a node which performs CTCGreedyDecoderSeqLen. - - :param data: The input 3D tensor. Shape: [batch_size, seq_length, num_classes] - :param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size] - :param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class. - Optional parameter. Default value is num_classes-1. - :return: The new node which performs CTCGreedyDecoderSeqLen. - """ - if blank_index is not None: - inputs = as_nodes(data, sequence_length, blank_index) - else: - inputs = as_nodes(data, sequence_length) - - attributes = {"merge_repeated": merge_repeated, "classes_index_type": classes_index_type, "sequence_length_type": sequence_length_type} - - return _get_node_factory_opset6().create("CTCGreedyDecoderSeqLen", inputs, attributes) - - -@nameable_op -def gather_elements( - data: NodeInput, - indices: NodeInput, - axis: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherElements. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered - :param axis: axis along which elements are gathered - :return: The new node which performs GatherElements - """ - inputs = as_nodes(data, indices) - - attributes = {"axis": axis} - - return _get_node_factory_opset6().create("GatherElements", inputs, attributes) - - -@nameable_op -def mvn( - data: Node, - axes: Node, - normalize_variance: bool, - eps: float, - eps_mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs MeanVarianceNormalization (MVN). - - :param data: The node with data tensor. - :param axes: The node with axes to reduce on. - :param normalize_variance: Denotes whether to perform variance normalization. - :param eps: The number added to the variance to avoid division by zero - when normalizing the value. Scalar value. - :param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`) - :param name: Optional output node name. - :return: The new node performing a MVN operation on input tensor. - """ - inputs = as_nodes(data, axes) - - attributes = {"normalize_variance": normalize_variance, "eps": eps, "eps_mode": eps_mode} - - return _get_node_factory_opset6().create("MVN", inputs, attributes) - - -@nameable_op -def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param new_value: Node producing a value to be assigned to a variable. - :param variable_id: Id of a variable to be updated. - :param name: Optional name for output node. - :return: Assign node - """ - return _get_node_factory_opset6().create("Assign", [as_node(new_value)], {"variable_id": variable_id}) - - -@nameable_op -def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param init_value: Node producing a value to be returned instead of an unassigned variable. - :param variable_id: Id of a variable to be read. - :param name: Optional name for output node. - :return: ReadValue node - """ - return _get_node_factory_opset6().create("ReadValue", [as_node(init_value)], {"variable_id": variable_id}) diff --git a/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py deleted file mode 100644 index 2a7d139ba597ff..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset7.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset7/ops.py b/src/bindings/python/src/compatibility/ngraph/opset7/ops.py deleted file mode 100644 index d66d8a57e4dec8..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset7/ops.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset7 = partial(_get_node_factory, "opset7") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def einsum(inputs: List[Node], equation: str) -> Node: - """Return a node which performs Einsum operation. - - :param inputs: The list of input nodes - :param equation: Einsum equation - :return: The new node performing Einsum operation on the inputs - """ - attributes = {"equation": equation} - - return _get_node_factory_opset7().create("Einsum", as_nodes(*inputs), attributes) - - -@nameable_op -def gelu( - data: Node, - approximation_mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs Gelu activation function. - - :param data: The node with data tensor. - :param approximation_mode: defines which approximation to use ('tanh' or 'erf') - :param name: Optional output node name. - :return: The new node performing a Gelu activation with the input tensor. - """ - inputs = as_nodes(data) - - attributes = {"approximation_mode": approximation_mode} - - return _get_node_factory_opset7().create("Gelu", inputs, attributes) - - -@nameable_op -def roll( - data: NodeInput, - shift: NodeInput, - axes: NodeInput, -) -> Node: - """Return a node which performs Roll operation. - - :param data: The node with data tensor. - :param shift: The node with the tensor with numbers of places by which elements are shifted. - :param axes: The node with the tensor with axes along which elements are shifted. - :return: The new node performing a Roll operation on the input tensor. - """ - inputs = as_nodes(data, shift, axes) - - return _get_node_factory_opset7().create("Roll", inputs) - - -@nameable_op -def gather( - data: NodeInput, - indices: NodeInput, - axis: NodeInput, - batch_dims: Optional[int] = 0, -) -> Node: - """Return a node which performs Gather. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered - :param axis: axis along which elements are gathered - :param batch_dims: number of batch dimensions - :return: The new node which performs Gather - """ - inputs = as_nodes(data, indices, axis) - attributes = {"batch_dims": batch_dims} - return _get_node_factory_opset7().create("Gather", inputs, attributes) - - -def dft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs DFT operation. - - :param data: Tensor with transformed data. - :param axes: Tensor with axes to transform. - :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs DFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset7().create("DFT", inputs) - - -@nameable_op -def idft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs IDFT operation. - - :param data: Tensor with transformed data. - :param axes: Tensor with axes to transform. - :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs IDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset7().create("IDFT", inputs) diff --git a/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py deleted file mode 100644 index b4bd72cb4b1384..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset1.ops import interpolate -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset8.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset8/ops.py b/src/bindings/python/src/compatibility/ngraph/opset8/ops.py deleted file mode 100644 index f659f481096699..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset8/ops.py +++ /dev/null @@ -1,772 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import List, Optional, Tuple - -import numpy as np -from ngraph.exceptions import UserInputError -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.input_validation import ( - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - TensorShape, - as_node, - as_nodes, -) - -_get_node_factory_opset8 = partial(_get_node_factory, "opset8") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def deformable_convolution( - data: NodeInput, - offsets: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - mask: Optional[NodeInput] = None, - auto_pad: str = "EXPLICIT", - group: int = 1, - deformable_group: int = 1, - bilinear_interpolation_pad: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs deformable convolution operation. - - :param data: The node providing data batch tensor. - :param offsets: The node providing offset tensor. - :param filters: The node providing filters tensor. - :param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param mask: The node providing modulation scalar (mask) tensor. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param group: The number of groups which both output and input should be split into. - :param deformable_group: The number of groups which deformable values and output should be split - into along the channel axis. - :param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation - execution. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - if mask is None: - inputs = as_nodes(data, offsets, filters) - else: - inputs = as_nodes(data, offsets, filters, mask) - - return _get_node_factory_opset8().create( - "DeformableConvolution", - inputs, - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - "group": group, - "deformable_group": deformable_group, - "bilinear_interpolation_pad": bilinear_interpolation_pad, - }, - ) - - -@nameable_op -def adaptive_avg_pool(data: NodeInput, output_shape: NodeInput) -> Node: - """Return a node which performs AdaptiveAvgPool operation. - - :param data: The list of input nodes - :param output_shape: the shape of spatial dimentions after operation - :return: The new node performing AdaptiveAvgPool operation on the data - """ - inputs = as_nodes(data, output_shape) - return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs) - - -@nameable_op -def adaptive_max_pool(data: NodeInput, output_shape: NodeInput, index_element_type: str = "i64") -> Node: - """Return a node which performs AdaptiveMaxPool operation. - - :param data: The list of input nodes - :param output_shape: the shape of spatial dimentions after operation - :param index_element_type: Type of indices output. - :return: The new node performing AdaptiveMaxPool operation on the data - """ - inputs = as_nodes(data, output_shape) - - attributes = { - "index_element_type": index_element_type, - } - - return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes) - - -@nameable_op -def multiclass_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - iou_threshold: float = 0.0, - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - nms_eta: float = 1.0, - normalized: bool = True, -) -> Node: - """Return a node which performs MulticlassNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param iou_threshold: Specifies intersection over union threshold - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MuticlassNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "iou_threshold": iou_threshold, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "nms_eta": nms_eta, - "normalized": normalized, - } - - return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes) - - -@nameable_op -def matrix_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - decay_function: str = "linear", - gaussian_sigma: float = 2.0, - post_threshold: float = 0.0, - normalized: bool = True, -) -> Node: - """Return a node which performs MatrixNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param decay_function: Specifies decay function used to decay scores, possible values: - 'gaussian', 'linear' - :param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function - :param post_threshold: Specifies threshold to filter out boxes with low confidence score - after decaying - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MatrixNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "decay_function": decay_function, - "gaussian_sigma": gaussian_sigma, - "post_threshold": post_threshold, - "normalized": normalized, - } - - return _get_node_factory_opset8().create("MatrixNms", inputs, attributes) - - -@nameable_op -def gather( - data: NodeInput, - indices: NodeInput, - axis: NodeInput, - batch_dims: Optional[int] = 0, -) -> Node: - """Return a node which performs Gather with support of negative indices. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered. Negative indices - indicate reverse indexing from the end - :param axis: axis along which elements are gathered - :param batch_dims: number of batch dimensions - :return: The new node which performs Gather - """ - inputs = as_nodes(data, indices, axis) - attributes = {"batch_dims": batch_dims} - return _get_node_factory_opset8().create("Gather", inputs, attributes) - - -@nameable_op -def max_pool( - data: NodeInput, - strides: List[int], - dilations: List[int], - pads_begin: List[int], - pads_end: List[int], - kernel_shape: TensorShape, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - index_element_type: Optional[str] = "i64", - axis: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Perform max pooling operation and return both values and indices of the selected elements. - - :param data: The node providing input data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param dilations: The dilation of filter elements(distance between elements). - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param kernel_shape: The pooling operation kernel shape. - :param rounding_type: Determines used rounding schema when computing output shape. - Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'. - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid']. Defaults to None. - :param index_element_type: The data type used for the indices output of this operator. - Defaults to i64. - :param axis: The first dimension in the data shape used to determine the maximum - returned index value. The value is the product of all dimensions - starting at the provided axis. Defaults to 0. - :param name: The optional name for the created output node. - - :return: The new node performing max pooling operation. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset8().create( - "MaxPool", - [as_node(data)], - { - "strides": strides, - "dilations": dilations, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - "index_element_type": index_element_type, - "axis": axis, - }, - ) - - -@nameable_op -def random_uniform(output_shape: NodeInput, min_val: NodeInput, max_val: NodeInput, output_type: str, global_seed: int = 0, op_seed: int = 0) -> Node: - """Return a node which generates sequence of random values from uniform distribution. - - :param output_shape: Tensor with shape of the output tensor. - :param min_val: Tensor with the lower bound on the range of random values to generate. - :param max_val: Tensor with the upper bound on the range of random values to generate. - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. - :param global_seed: Specifies global seed value. Required to be a positive integer or 0. - :param op_seed: Specifies operational seed value. Required to be a positive integer or 0. - :return: The new node which performs generation of random values from uniform distribution. - """ - inputs = as_nodes(output_shape, min_val, max_val) - - if global_seed < 0: - raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed)) - - if op_seed < 0: - raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed)) - - attributes = { - "output_type": output_type, - "global_seed": global_seed, - "op_seed": op_seed, - } - return _get_node_factory_opset8().create("RandomUniform", inputs, attributes) - - -@nameable_op -def if_op( - condition: NodeInput, - inputs: List[Node], - bodies: Tuple[GraphBody, GraphBody], - input_desc: Tuple[List[TensorIteratorInvariantInputDesc], List[TensorIteratorInvariantInputDesc]], - output_desc: Tuple[List[TensorIteratorBodyOutputDesc], List[TensorIteratorBodyOutputDesc]], - name: Optional[str] = None, -) -> Node: - """Execute one of the bodies depending on condtion value. - - :param condition: A scalar or 1D tensor with 1 element specifying body will be executed. - If condition is True, then body will be executed, False - else_body. - :param inputs: The provided inputs to If operation. - :param bodies: Two graphs (then_body, else_body) which will be executed depending on - condition value. - :param input_desc Two lists (for then_body and else_body) which contain rules how If - inputs are connected with body parameters. - :param output_desc: Two lists (for then_body and else_body) which contain rules how If - outputs are connected with body results. - :param name: The optional name for the created output node. - - :return: The new node which performs If operation. - """ - attributes = { - "then_body": bodies[0].serialize(), - "else_body": bodies[1].serialize(), - "then_inputs": {"invariant_input_desc": [desc.serialize() for desc in input_desc[0]]}, - "else_inputs": {"invariant_input_desc": [desc.serialize() for desc in input_desc[1]]}, - "then_outputs": {"body_output_desc": [desc.serialize() for desc in output_desc[0]]}, - "else_outputs": {"body_output_desc": [desc.serialize() for desc in output_desc[1]]}, - } - return _get_node_factory_opset8().create("If", as_nodes(condition, *inputs), attributes) - - -@nameable_op -def slice( - data: NodeInput, - start: NodeInput, - stop: NodeInput, - step: NodeInput, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which generates Slice operation. - - :param data: The node providing input data. - :param start: The node providing start indices (inclusively). - :param stop: The node providing stop indices (exclusively). - :param step: The node providing step values. - :param axes: The optional node providing axes to slice, default [0, 1, ..., len(start)-1]. - :param name: The optional name for the created output node. - :return: The new node performing Slice operation. - """ - if axes is None: - inputs = as_nodes(data, start, stop, step) - else: - inputs = as_nodes(data, start, stop, step, axes) - - return _get_node_factory_opset8().create("Slice", inputs) - - -@nameable_op -def gather_nd( - data: NodeInput, - indices: NodeInput, - batch_dims: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherND. - - :param data: N-D tensor with data for gathering - :param indices: K-D tensor of tuples with indices by which data is gathered - :param batch_dims: Scalar value of batch dimensions - :return: The new node which performs GatherND - """ - inputs = as_nodes(data, indices) - - attributes = {"batch_dims": batch_dims} - - return _get_node_factory_opset8().create("GatherND", inputs, attributes) - - -def prior_box(layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes and aspect ratios across all dimensions. - - Available attributes are: - * min_size The minimum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - * max_size The maximum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - * aspect_ratio Aspect ratios of prior boxes. - Range of values: set of positive floating point numbers - Default value: [] - Required: no - * flip The flag that denotes that each aspect_ratio is duplicated and flipped. - Range of values: {True, False} - Default value: False - Required: no - * clip The flag that denotes if each value in the output tensor should be clipped - to [0,1] interval. - Range of values: {True, False} - Default value: False - Required: no - * step The distance between box centers. - Range of values: floating point non-negative number - Default value: 0 - Required: no - * offset This is a shift of box respectively to top left corner. - Range of values: floating point non-negative number - Default value: None - Required: yes - * variance The variance denotes a variance of adjusting bounding boxes. The attribute - could contain 0, 1 or 4 elements. - Range of values: floating point positive numbers - Default value: [] - Required: no - * scale_all_sizes The flag that denotes type of inference. - Range of values: False - max_size is ignored - True - max_size is used - Default value: True - Required: no - * fixed_ratio This is an aspect ratio of a box. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * fixed_size This is an initial box size (in pixels). - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * density This is the square root of the number of boxes of each type. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * min_max_aspect_ratios_order The flag that denotes the order of output prior box. - Range of values: False - the output prior box is in order of - [min, aspect_ratios, max] - True - the output prior box is in order of - [min, max, aspect_ratios] - Default value: True - Required: no - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - attrs = { - 'offset': 85, - 'flip': True, - 'clip': True, - 'fixed_size': [32, 64, 128] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - - :param layer_shape: Shape of layer for which prior boxes are computed. - :param image_shape: Shape of image to which prior boxes are scaled. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing prior box operation. - """ - requirements = [ - ("offset", True, np.floating, is_non_negative_value), - ("min_size", False, np.floating, is_positive_value), - ("max_size", False, np.floating, is_positive_value), - ("aspect_ratio", False, np.floating, is_positive_value), - ("flip", False, np.bool_, None), - ("clip", False, np.bool_, None), - ("step", False, np.floating, is_non_negative_value), - ("variance", False, np.floating, is_positive_value), - ("scale_all_sizes", False, np.bool_, None), - ("fixed_ratio", False, np.floating, is_positive_value), - ("fixed_size", False, np.floating, is_positive_value), - ("density", False, np.floating, is_positive_value), - ("min_max_aspect_ratios_order", False, np.bool_, None), - ] - - check_valid_attributes("PriorBox", attrs, requirements) - - return _get_node_factory_opset8().create("PriorBox", [layer_shape, as_node(image_shape)], attrs) - - -@nameable_op -def i420_to_bgr( - arg: NodeInput, - arg_u: Optional[NodeInput] = None, - arg_v: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs I420toBGR operation. - - :param arg: The node providing single or Y plane data. - :param arg_u: The node providing U plane data. Required for separate planes. - :param arg_v: The node providing V plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing I420toBGR operation. - """ - if arg_u is None and arg_v is None: - inputs = as_nodes(arg) - elif arg_u is not None and arg_v is not None: - inputs = as_nodes(arg, arg_u, arg_v) - else: - raise UserInputError("Operation I420toBGR must have one (single plane) or three (separate planes) inputs provided.") - - return _get_node_factory_opset8().create("I420toBGR", inputs) - - -@nameable_op -def i420_to_rgb( - arg: NodeInput, - arg_u: Optional[NodeInput] = None, - arg_v: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs I420toRGB operation. - - :param arg: The node providing single or Y plane data. - :param arg_u: The node providing U plane data. Required for separate planes. - :param arg_v: The node providing V plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing I420toRGB operation. - """ - if arg_u is None and arg_v is None: - inputs = as_nodes(arg) - elif arg_u is not None and arg_v is not None: - inputs = as_nodes(arg, arg_u, arg_v) - else: - raise UserInputError("Operation I420toRGB must have one (single plane) or three (separate planes) inputs provided.") - - return _get_node_factory_opset8().create("I420toRGB", inputs) - - -@nameable_op -def nv12_to_bgr( - arg: NodeInput, - arg_uv: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NV12toBGR operation. - - :param arg: The node providing single or Y plane data. - :param arg_uv: The node providing UV plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing NV12toBGR operation. - """ - if arg_uv is None: - inputs = as_nodes(arg) - else: - inputs = as_nodes(arg, arg_uv) - - return _get_node_factory_opset8().create("NV12toBGR", inputs) - - -@nameable_op -def nv12_to_rgb( - arg: NodeInput, - arg_uv: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NV12toRGB operation. - - :param arg: The node providing single or Y plane data. - :param arg_uv: The node providing UV plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing NV12toRGB operation. - """ - if arg_uv is None: - inputs = as_nodes(arg) - else: - inputs = as_nodes(arg, arg_uv) - - return _get_node_factory_opset8().create("NV12toRGB", inputs) - - -@nameable_op -def detection_output( - box_logits: NodeInput, - class_preds: NodeInput, - proposals: NodeInput, - attrs: dict, - aux_class_preds: Optional[NodeInput] = None, - aux_box_preds: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Generate the detection output using information on location and confidence predictions. - - Available attributes are: - * background_label_id The background label id. - Range of values: integer value - Default value: 0 - Required: no - * top_k Maximum number of results to be kept per batch after NMS step. - Range of values: integer value - Default value: -1 - Required: no - * variance_encoded_in_target The flag that denotes if variance is encoded in target. - Range of values: {False, True} - Default value: False - Required: no - * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step. - Range of values: integer values - Default value: None - Required: yes - * code_type The type of coding method for bounding boxes. - Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE', - 'caffe.PriorBoxParameter.CORNER'} - Default value: 'caffe.PriorBoxParameter.CORNER' - Required: no - * share_location The flag that denotes if bounding boxes are shared among different - classes. - Range of values: {True, False} - Default value: True - Required: no - * nms_threshold The threshold to be used in the NMS stage. - Range of values: floating point value - Default value: None - Required: yes - * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be - considered. - Range of values: floating point value - Default value: 0 - Required: no - * clip_after_nms The flag that denotes whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - * clip_before_nms The flag that denotes whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - * decrease_label_id The flag that denotes how to perform NMS. - Range of values: False - perform NMS like in Caffe*. - True - perform NMS like in MxNet*. - Default value: False - Required: no - * normalized The flag that denotes whether input tensors with boxes are normalized. - Range of values: {True, False} - Default value: False - Required: no - * input_height The input image height. - Range of values: positive integer number - Default value: 1 - Required: no - * input_width The input image width. - Range of values: positive integer number - Default value: 1 - Required: no - * objectness_score The threshold to sort out confidence predictions. - Range of values: non-negative float number - Default value: 0 - Required: no - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - } - attrs = { - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - 'normalized': True, - 'clip_before_nms': True, - 'input_height': [32], - 'input_width': [32], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - - :param box_logits: The 2D input tensor with box logits. - :param class_preds: The 2D input tensor with class predictions. - :param proposals: The 3D input tensor with proposals. - :param attrs: The dictionary containing key, value pairs for attributes. - :param aux_class_preds: The 2D input tensor with additional class predictions information. - :param aux_box_preds: The 2D input tensor with additional box predictions information. - :param name: Optional name for the output node. - :return: Node representing DetectionOutput operation. - """ - requirements = [ - ("background_label_id", False, np.integer, None), - ("top_k", False, np.integer, None), - ("variance_encoded_in_target", False, np.bool_, None), - ("keep_top_k", True, np.integer, None), - ("code_type", False, np.str_, None), - ("share_location", False, np.bool_, None), - ("nms_threshold", True, np.floating, None), - ("confidence_threshold", False, np.floating, None), - ("clip_after_nms", False, np.bool_, None), - ("clip_before_nms", False, np.bool_, None), - ("decrease_label_id", False, np.bool_, None), - ("normalized", False, np.bool_, None), - ("input_height", False, np.integer, is_positive_value), - ("input_width", False, np.integer, is_positive_value), - ("objectness_score", False, np.floating, is_non_negative_value), - ] - - check_valid_attributes("DetectionOutput", attrs, requirements) - - inputs = [box_logits, class_preds, proposals] - if aux_class_preds is not None: - inputs.append(aux_class_preds) - if aux_box_preds is not None: - inputs.append(aux_box_preds) - inputs = as_nodes(*inputs) - - return _get_node_factory_opset8().create("DetectionOutput", inputs, attrs) - - -@nameable_op -def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply softmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which Softmax should be calculated. Can be positive or negative. - :param name: Optional name for the node - :return: The new node with softmax operation applied on each element. - """ - return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis}) diff --git a/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py deleted file mode 100644 index b967e2c6d0d068..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset1.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset9/ops.py b/src/bindings/python/src/compatibility/ngraph/opset9/ops.py deleted file mode 100644 index 1c744216e9dadb..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset9/ops.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import Optional - -import numpy as np -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, - as_node, - make_constant_node, -) - - -_get_node_factory_opset9 = partial(_get_node_factory, "opset9") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def eye( - num_rows: NodeInput, - num_columns: NodeInput, - diagonal_index: NodeInput, - output_type: str, - batch_shape: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs eye operation. - - :param num_rows: The node providing row number tensor. - :param num_columns: The node providing column number tensor. - :param diagonal_index: The node providing the index of the diagonal to be populated. - :param output_type: Specifies the output tensor type, supports any numeric types. - :param batch_shape: The node providing the leading batch dimensions of output shape. Optionally. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - if batch_shape is not None: - inputs = as_nodes(num_rows, num_columns, diagonal_index, batch_shape) - else: - inputs = as_nodes(num_rows, num_columns, diagonal_index) - - return _get_node_factory_opset9().create("Eye", inputs, {"output_type": output_type}) - - -def roi_align( - data: NodeInput, - rois: NodeInput, - batch_indices: NodeInput, - pooled_h: int, - pooled_w: int, - sampling_ratio: int, - spatial_scale: float, - mode: str, - aligned_mode: Optional[str] = "asymmetric", - name: Optional[str] = None, -) -> Node: - """Return a node which performs ROIAlign operation. - - :param data: Input data. - :param rois: RoIs (Regions of Interest) to pool over. - :param batch_indices: Tensor with each element denoting the index of - the corresponding image in the batch. - :param pooled_h: Height of the ROI output feature map. - :param pooled_w: Width of the ROI output feature map. - :param sampling_ratio: Number of bins over height and width to use to calculate - each output feature map element. - :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. - :param mode: Method to perform pooling to produce output feature map elements. Avaiable modes are: - - 'max' - maximum pooling - - 'avg' - average pooling - :param aligned_mode: Specifies how to transform the coordinate in original tensor to the resized tensor. - Mode 'asymmetric' is the default value. Optional. Avaiable aligned modes are: - - 'asymmetric' - - 'half_pixel_for_nn' - - 'half_pixel' - :param name: The optional name for the output node - - :return: The new node which performs ROIAlign - """ - inputs = as_nodes(data, rois, batch_indices) - attributes = { - "pooled_h": pooled_h, - "pooled_w": pooled_w, - "sampling_ratio": sampling_ratio, - "spatial_scale": spatial_scale, - "mode": mode, - "aligned_mode": aligned_mode, - } - return _get_node_factory_opset9().create("ROIAlign", inputs, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - soft_nms_sigma: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - max_output_boxes_per_class = max_output_boxes_per_class if max_output_boxes_per_class is not None else make_constant_node(0, np.int64) - iou_threshold = iou_threshold if iou_threshold is not None else make_constant_node(0, np.float32) - score_threshold = score_threshold if score_threshold is not None else make_constant_node(0, np.float32) - soft_nms_sigma = soft_nms_sigma if soft_nms_sigma is not None else make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma) - - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset9().create("NonMaxSuppression", inputs, attributes) - - -def softsign(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply SoftSign operation on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: The optional name for the output node. - :return: New node with SoftSign operation applied on each element of it. - """ - return _get_node_factory_opset9().create("SoftSign", [as_node(node)], {}) - - -@nameable_op -def rdft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs RDFT operation. - - :param data: Tensor with data. - :param axes: Tensor with axes to transform. - :param signal_size: Optional tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs RDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset9().create("RDFT", inputs) - - -@nameable_op -def irdft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs IRDFT operation. - - :param data: Tensor with data. - :param axes: Tensor with axes to transform. - :param signal_size: Optional tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs IRDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset9().create("IRDFT", inputs) - - -@nameable_op -def multiclass_nms( - boxes: NodeInput, - scores: NodeInput, - roisnum: Optional[NodeInput] = None, - sort_result_type: Optional[str] = "none", - sort_result_across_batch: Optional[bool] = False, - output_type: Optional[str] = "i64", - iou_threshold: Optional[float] = 0.0, - score_threshold: Optional[float] = 0.0, - nms_top_k: Optional[int] = -1, - keep_top_k: Optional[int] = -1, - background_class: Optional[int] = -1, - nms_eta: Optional[float] = 1.0, - normalized: Optional[bool] = True, -) -> Node: - """Return a node which performs MulticlassNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param roisnum: Tensor with roisnum. Specifies the number of rois in each image. Required when - 'scores' is a 2-dimensional tensor. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param iou_threshold: Specifies intersection over union threshold - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MuticlassNms - """ - if roisnum is None: - inputs = as_nodes(boxes, scores) - else: - inputs = as_nodes(boxes, scores, roisnum) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "iou_threshold": iou_threshold, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "nms_eta": nms_eta, - "normalized": normalized, - } - - return _get_node_factory_opset9().create("MulticlassNms", inputs, attributes) - - -def generate_proposals( - im_info: NodeInput, - anchors: NodeInput, - deltas: NodeInput, - scores: NodeInput, - min_size: float, - nms_threshold: float, - pre_nms_count: int, - post_nms_count: int, - normalized: bool = True, - nms_eta: float = 1.0, - roi_num_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs GenerateProposals operation. - - :param im_info: Input with image info. - :param anchors: Input anchors. - :param deltas: Input deltas. - :param scores: Input scores. - :param min_size: Specifies minimum box width and height. - :param nms_threshold: Specifies threshold to be used in the NMS stage. - :param pre_nms_count: Specifies number of top-n proposals before NMS. - :param post_nms_count: Specifies number of top-n proposals after NMS. - :param normalized: Specifies whether proposal bboxes are normalized or not. Optional attribute, default value is `True`. - :param nms_eta: Specifies eta parameter for adaptive NMS., must be in range `[0.0, 1.0]`. Optional attribute, default value is `1.0`. - :param roi_num_type: Specifies the element type of the third output `rpnroisnum`. Optional attribute, range of values: `i64` (default) or `i32`. - :param name: The optional name for the output node. - :return: New node performing GenerateProposals operation. - """ - inputs = as_nodes(im_info, anchors, deltas, scores) - - attributes = { - "min_size": min_size, - "nms_threshold": nms_threshold, - "pre_nms_count": pre_nms_count, - "post_nms_count": post_nms_count, - "normalized": normalized, - "nms_eta": nms_eta, - "roi_num_type": roi_num_type, - } - - return _get_node_factory_opset9().create("GenerateProposals", inputs, attributes) - - -def grid_sample(data: NodeInput, grid: NodeInput, attributes: dict, name: Optional[str] = None) -> Node: - """Return a node which performs GridSample operation. - - :param data: The input image. - :param grid: Grid values (normalized input coordinates). - :param attributes: A dictionary containing GridSample's attributes. - :param name: Optional name of the node. - Available attributes: - * align_corners A flag which specifies whether to align the grid extrema values - with the borders or center points of the input tensor's border pixels. - Range of values: true, false - Default value: false - Required: no - * mode Specifies the type of interpolation. - Range of values: bilinear, bicubic, nearest - Default value: bilinear - Required: no - * padding_mode Specifies how the out-of-bounds coordinates should be handled. - Range of values: zeros, border, reflection - Default value: zeros - Required: no - :return: A new GridSample node. - """ - return _get_node_factory_opset9().create("GridSample", as_nodes(data, grid), attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset_utils.py b/src/bindings/python/src/compatibility/ngraph/opset_utils.py deleted file mode 100644 index a639dcbe90abd2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset_utils.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Optional -import numpy as np - -from ngraph.impl import Node -from ngraph.utils.decorators import nameable_op -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.types import ( - as_node, - NodeInput, -) - - -def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory: - """Return NodeFactory configured to create operators from specified opset version.""" - if opset_version: - return NodeFactory(opset_version) - else: - return NodeFactory() diff --git a/src/bindings/python/src/compatibility/ngraph/utils/__init__.py b/src/bindings/python/src/compatibility/ngraph/utils/__init__.py deleted file mode 100644 index 0375e1394e7a63..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Generic utilities. Factor related functions out to separate files.""" diff --git a/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py b/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py deleted file mode 100644 index 7d4bb114d00f0d..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging -from typing import List, Optional - -import ngraph as ng -from ngraph.impl import AxisSet, Node -from ngraph.utils.types import NodeInput, TensorShape, get_dtype, make_constant_node - -log = logging.getLogger(__name__) - - -def get_broadcast_axes(output_shape: TensorShape, input_shape: TensorShape, axis: Optional[int] = None) -> AxisSet: - """Generate a list of broadcast axes for ngraph++ broadcast. - - Informally, a broadcast "adds" axes to the input tensor, - replicating elements from the input tensor as needed to fill the new dimensions. - Function calculate which of the output axes are added in this way. - - :param output_shape: The new shape for the output tensor. - :param input_shape: The shape of input tensor. - :param axis: The axis along which we want to replicate elements. - :return: The indices of added axes. - """ - axes_indexes = list(range(0, len(output_shape))) - if axis is None: - output_begin = len(output_shape) - len(input_shape) - else: - output_begin = axis - right_axes_indexes = list(range(output_begin, output_begin + len(input_shape))) - for index in reversed(right_axes_indexes): - del axes_indexes[index] - return AxisSet(set(axes_indexes)) diff --git a/src/bindings/python/src/compatibility/ngraph/utils/decorators.py b/src/bindings/python/src/compatibility/ngraph/utils/decorators.py deleted file mode 100644 index a0b955714a0ba4..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/decorators.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from functools import wraps -from typing import Any, Callable - -from ngraph.impl import Node -from ngraph.utils.types import NodeInput, as_node, as_nodes - - -def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node: - if "name" in kwargs: - node.friendly_name = kwargs["name"] - return node - - -def nameable_op(node_factory_function: Callable) -> Callable: - """Set the name to the ngraph operator returned by the wrapped function.""" - - @wraps(node_factory_function) - def wrapper(*args: Any, **kwargs: Any) -> Node: - node = node_factory_function(*args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper - - -def unary_op(node_factory_function: Callable) -> Callable: - """Convert the first input value to a Constant Node if a numeric value is detected.""" - - @wraps(node_factory_function) - def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: - input_node = as_node(input_value) - node = node_factory_function(input_node, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper - - -def binary_op(node_factory_function: Callable) -> Callable: - """Convert the first two input values to Constant Nodes if numeric values are detected.""" - - @wraps(node_factory_function) - def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node: - left, right = as_nodes(left, right) - node = node_factory_function(left, right, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper diff --git a/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py b/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py deleted file mode 100644 index f0e5f61f52fb50..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Helper functions for validating user input.""" - -import logging -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type - -import numpy as np - -from ngraph.exceptions import UserInputError - -log = logging.getLogger(__name__) - - -def assert_list_of_ints(value_list: Iterable[int], message: str) -> None: - """Verify that the provided value is an iterable of integers.""" - try: - for value in value_list: - if not isinstance(value, int): - raise TypeError - except TypeError: - log.warning(message) - raise UserInputError(message, value_list) - - -def _check_value(op_name, attr_key, value, val_type, cond=None): - # type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool - """Check whether provided value satisfies specified criteria. - - :param op_name: The operator name which attributes are checked. - :param attr_key: The attribute name. - :param value: The value to check. - :param val_type: Required value type. - :param cond: The optional function running additional checks. - - :raises UserInputError: - :return: True if attribute satisfies all criterias. Otherwise False. - """ - if not np.issubdtype(type(value), val_type): - raise UserInputError('{} operator attribute "{}" value must by of type {}.'.format(op_name, attr_key, val_type)) - if cond is not None and not cond(value): - raise UserInputError('{} operator attribute "{}" value does not satisfy provided condition.'.format(op_name, attr_key)) - return True - - -def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False): - # type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool - """Check whether specified attribute satisfies given criteria. - - :param op_name: The operator name which attributes are checked. - :param attr_dict: Dictionary containing key-value attributes to check. - :param attr_key: Key value for validated attribute. - :param val_type: Value type for validated attribute. - :param cond: Any callable wich accept attribute value and returns True or False. - :param required: Whether provided attribute key is not required. This mean it may be missing - from provided dictionary. - - :raises UserInputError: - - :return: True if attribute satisfies all criterias. Otherwise False. - """ - result = True - - if required and attr_key not in attr_dict: - raise UserInputError('Provided dictionary is missing {} operator required attribute "{}"'.format(op_name, attr_key)) - - if attr_key not in attr_dict: - return result - - attr_value = attr_dict[attr_key] - - if np.isscalar(attr_value): - result = result and _check_value(op_name, attr_key, attr_value, val_type, cond) - else: - for v in attr_value: - result = result and _check_value(op_name, attr_key, v, val_type, cond) - - return result - - -def check_valid_attributes( - op_name, # type: str - attributes, # type: Dict[str, Any] - requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]] -): - # type: (...) -> bool - """Perform attributes validation according to specified type, value criteria. - - :param op_name: The operator name which attributes are checked. - :param attributes: The dictionary with user provided attributes to check. - :param requirements: The list of tuples describing attributes' requirements. The tuple should - contain following values: - (attr_name: str, - is_required: bool, - value_type: Type, - value_condition: Callable) - - :raises UserInputError: - :return: True if all attributes satisfies criterias. Otherwise False. - """ - for attr, required, val_type, cond in requirements: - check_valid_attribute(op_name, attributes, attr, val_type, cond, required) - return True - - -def is_positive_value(x): # type: (Any) -> bool - """Determine whether the specified x is positive value. - - :param x: The value to check. - - :return: True if the specified x is positive value, False otherwise. - """ - return x > 0 - - -def is_non_negative_value(x): # type: (Any) -> bool - """Determine whether the specified x is non-negative value. - - :param x: The value to check. - - :return: True if the specified x is non-negative value, False otherwise. - """ - return x >= 0 diff --git a/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py b/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py deleted file mode 100644 index 0e3d2cc09cecc2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from functools import partial -from typing import Any, Dict, List, Optional, Union - -from _pyngraph import NodeFactory as _NodeFactory - -from ngraph.impl import Node, Output - -from ngraph.exceptions import UserInputError - -DEFAULT_OPSET = "opset11" - - -class NodeFactory(object): - """Factory front-end to create node objects.""" - - def __init__(self, opset_version: str = DEFAULT_OPSET) -> None: - """Create the NodeFactory object. - - :param opset_version: The opset version the factory will use to produce ops from. - """ - self.factory = _NodeFactory(opset_version) - - def create( - self, - op_type_name: str, - arguments: Optional[List[Union[Node, Output]]] = None, - attributes: Optional[Dict[str, Any]] = None, - ) -> Node: - """Create node object from provided description. - - The user does not have to provide all node's attributes, but only required ones. - - :param op_type_name: The operator type name. - :param arguments: The operator arguments. - :param attributes: The operator attributes. - - returns Node object representing requested operator with attributes set. - """ - if arguments is None and attributes is None: - node = self.factory.create(op_type_name) - node._attr_cache = {} - node._attr_cache_valid = False - return node - - if arguments is None and attributes is not None: - raise UserInputError('Error: cannot create "{}" op without arguments.'.format(op_type_name)) - - if attributes is None: - attributes = {} - - assert arguments is not None - - arguments = self._arguments_as_outputs(arguments) - node = self.factory.create(op_type_name, arguments, attributes) - - # Currently we don't support any attribute getters & setters for TensorIterator node. - if node.get_type_name() == "TensorIterator": - return node - - # Set getters and setters for each node's attribute. - # node.get_attribute_name() - # node.set_attribute_name() - # For compound (with more than one level of nesting) attributes of form ie.: - # node.class_member_name.some_metric.attr_name: - # node.get_some_metric_attr_name() - # node.set_some_metric_attr_name() - # Please see test_dyn_attributes.py for more usage examples. - all_attributes = node.get_attributes() - for attr_name in all_attributes.keys(): - setattr( - node, - self._normalize_attr_name_getter(attr_name), - partial(NodeFactory._get_node_attr_value, node, attr_name), - ) - setattr( - node, - self._normalize_attr_name_setter(attr_name), - partial(NodeFactory._set_node_attr_value, node, attr_name), - ) - - # Setup helper members for caching attribute values. - # The cache would be lazily populated at first access attempt. - node._attr_cache = {} - node._attr_cache_valid = False - - return node - - @staticmethod - def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]: - outputs = [] - for argument in arguments: - if issubclass(type(argument), Output): - outputs.append(argument) - else: - log.warning( - "Op arguments were passed as Node, please avoid passing arguments in " - "this manner, and pass Output(s) instead, because accepting Nodes will " - "be deprecated in a future release." - ) - outputs.extend(argument.outputs()) - return outputs - - @staticmethod - def _normalize_attr_name(attr_name: str, prefix: str) -> str: - """Normalize attribute name. - - :param attr_name: The attribute name. - :param prefix: The prefix to attach to attribute name. - - returns The modified attribute name. - """ - # Trim first part of the name if there is only one level of attribute hierarchy. - if attr_name.count(".") == 1: - attr_name = attr_name[attr_name.find(".") + 1:] - return prefix + attr_name.replace(".", "_") - - @classmethod - def _normalize_attr_name_getter(cls, attr_name: str) -> str: - """Normalize atr name to be suitable for getter function name. - - :param attr_name: The attribute name to normalize - - returns The appropriate getter function name. - """ - return cls._normalize_attr_name(attr_name, "get_") - - @classmethod - def _normalize_attr_name_setter(cls, attr_name: str) -> str: - """Normalize attribute name to be suitable for setter function name. - - :param attr_name: The attribute name to normalize - - returns The appropriate setter function name. - """ - return cls._normalize_attr_name(attr_name, "set_") - - @staticmethod - def _get_node_attr_value(node: Node, attr_name: str) -> Any: - """Get provided node attribute value. - - :param node: The node we retrieve attribute value from. - :param attr_name: The attribute name. - - returns The node attribute value. - """ - if not node._attr_cache_valid: - node._attr_cache = node.get_attributes() - node._attr_cache_valid = True - return node._attr_cache[attr_name] - - @staticmethod - def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None: - """Set the node attribute value. - - :param node: The node we change attribute value for. - :param attr_name: The attribute name. - :param value: The new attribute value. - """ - node.set_attribute(attr_name, value) - node._attr_cache[attr_name] = value diff --git a/src/bindings/python/src/compatibility/ngraph/utils/reduction.py b/src/bindings/python/src/compatibility/ngraph/utils/reduction.py deleted file mode 100644 index 1c1779554c7f15..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/reduction.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Iterable, Optional - -from ngraph.impl import Node - - -def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: - """Get reduction axes if it is None and convert it to set if its type is different. - - If reduction_axes is None we default to reduce all axes. - - :param node: The node we fill reduction axes for. - :param reduction_axes: The collection of indices of axes to reduce. May be None. - :return: Set filled with indices of axes we want to reduce. - """ - if reduction_axes is None: - reduction_axes = set(range(len(node.shape))) - - if type(reduction_axes) is not set: - reduction_axes = set(reduction_axes) - return reduction_axes diff --git a/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py b/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py deleted file mode 100644 index 0f4650ea6f1279..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Helper classes for aggregating TensorIterator input/output desciptor attributes.""" - -from typing import List - -from ngraph.impl import Node -from ngraph.impl.op import Parameter - - -class GraphBody(object): - """Class containing graph parameters and results.""" - - def __init__( - self, - parameters: List[Parameter], - results: List[Node], - ) -> None: - self.parameters = parameters - self.results = results - - def serialize(self) -> dict: - """Serialize GraphBody as a dictionary.""" - return { - "parameters": self.parameters, - "results": self.results, - } - - -class TensorIteratorInputDesc(object): - """Represents a generic input descriptor for TensorIterator operator.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - ) -> None: - self.input_idx = input_idx - self.body_parameter_idx = body_parameter_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorInputDesc as a dictionary.""" - return { - "input_idx": self.input_idx, - "body_parameter_idx": self.body_parameter_idx, - } - - -class TensorIteratorSliceInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input formed from slices of TensorIterator input.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - start: int, - stride: int, - part_size: int, - end: int, - axis: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - self.start = start - self.stride = stride - self.part_size = part_size - self.end = end - self.axis = axis - - def serialize(self) -> dict: - """Serialize TensorIteratorSliceInputDesc as a dictionary.""" - output = super().serialize() - output["start"] = self.start - output["stride"] = self.stride - output["part_size"] = self.part_size - output["end"] = self.end - output["axis"] = self.axis - return output - - -class TensorIteratorMergedInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input with initial value in the first iteration. - - Later on, this input value is computed inside graph body. - """ - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - body_value_idx: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - self.body_value_idx = body_value_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorMergedInputDesc as a dictionary.""" - output = super().serialize() - output["body_value_idx"] = self.body_value_idx - return output - - -class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input that has invariant value during iteration.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - - -class TensorIteratorOutputDesc(object): - """Represents a generic output descriptor for TensorIterator operator.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - ) -> None: - self.body_value_idx = body_value_idx - self.output_idx = output_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorOutputDesc as a dictionary.""" - return { - "body_value_idx": self.body_value_idx, - "output_idx": self.output_idx, - } - - -class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc): - """Represents an output from a specific iteration.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - iteration: int = -1, - ) -> None: - super().__init__(body_value_idx, output_idx) - self.iteration = iteration - - def serialize(self) -> dict: - """Serialize TensorIteratorBodyOutputDesc as a dictionary.""" - output = super().serialize() - output["iteration"] = self.iteration - return output - - -class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc): - """Represents an output produced by concatenation of output from each iteration.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - start: int, - stride: int, - part_size: int, - end: int, - axis: int, - ) -> None: - super().__init__(body_value_idx, output_idx) - self.start = start - self.stride = stride - self.part_size = part_size - self.end = end - self.axis = axis - - def serialize(self) -> dict: - """Serialize TensorIteratorConcatOutputDesc as a dictionary.""" - output = super().serialize() - output["start"] = self.start - output["stride"] = self.stride - output["part_size"] = self.part_size - output["end"] = self.end - output["axis"] = self.axis - return output diff --git a/src/bindings/python/src/compatibility/ngraph/utils/types.py b/src/bindings/python/src/compatibility/ngraph/utils/types.py deleted file mode 100644 index 9556fe2ccf04f2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/types.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Functions related to converting between Python and numpy types and ngraph types.""" - -import logging -from typing import List, Union, Optional - -import numpy as np - -from ngraph.exceptions import NgraphTypeError -from ngraph.impl import Node, Shape, Output -from ngraph.impl import Type as NgraphType -from ngraph.impl.op import Constant - -log = logging.getLogger(__name__) - -TensorShape = List[int] -NumericData = Union[int, float, np.ndarray] -NumericType = Union[type, np.dtype] -ScalarData = Union[int, float] -NodeInput = Union[Node, NumericData] - -ngraph_to_numpy_types_map = [ - (NgraphType.boolean, bool), - (NgraphType.f16, np.float16), - (NgraphType.f32, np.float32), - (NgraphType.f64, np.float64), - (NgraphType.i8, np.int8), - (NgraphType.i16, np.int16), - (NgraphType.i32, np.int32), - (NgraphType.i64, np.int64), - (NgraphType.u8, np.uint8), - (NgraphType.u16, np.uint16), - (NgraphType.u32, np.uint32), - (NgraphType.u64, np.uint64), - (NgraphType.bf16, np.uint16), -] - -ngraph_to_numpy_types_str_map = [ - ("boolean", bool), - ("f16", np.float16), - ("f32", np.float32), - ("f64", np.float64), - ("i8", np.int8), - ("i16", np.int16), - ("i32", np.int32), - ("i64", np.int64), - ("u8", np.uint8), - ("u16", np.uint16), - ("u32", np.uint32), - ("u64", np.uint64), -] - - -def get_element_type(data_type: NumericType) -> NgraphType: - """Return an ngraph element type for a Python type or numpy.dtype.""" - if data_type is int: - log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") - return NgraphType.i32 - - if data_type is float: - log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") - return NgraphType.f32 - - ng_type = next((ng_type for (ng_type, np_type) in ngraph_to_numpy_types_map if np_type == data_type), None) - if ng_type: - return ng_type - - raise NgraphTypeError("Unidentified data type %s", data_type) - - -def get_element_type_str(data_type: NumericType) -> str: - """Return an ngraph element type string representation for a Python type or numpy dtype.""" - if data_type is int: - log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") - return "i32" - - if data_type is float: - log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") - return "f32" - - ng_type = next( - (ng_type for (ng_type, np_type) in ngraph_to_numpy_types_str_map if np_type == data_type), - None, - ) - if ng_type: - return ng_type - - raise NgraphTypeError("Unidentified data type %s", data_type) - - -def get_dtype(ngraph_type: NgraphType) -> np.dtype: - """Return a numpy.dtype for an ngraph element type.""" - np_type = next( - (np_type for (ng_type, np_type) in ngraph_to_numpy_types_map if ng_type == ngraph_type), - None, - ) - - if np_type: - return np.dtype(np_type) - - raise NgraphTypeError("Unidentified data type %s", ngraph_type) - - -def get_ndarray(data: NumericData) -> np.ndarray: - """Wrap data into a numpy ndarray.""" - if isinstance(data, np.ndarray): - return data - return np.array(data) - - -def get_shape(data: NumericData) -> TensorShape: - """Return a shape of NumericData.""" - if isinstance(data, np.ndarray): - return data.shape # type: ignore - if isinstance(data, list): - return [len(data)] # type: ignore - return [] - - -def make_constant_node(value: NumericData, dtype: Optional[NumericType] = None) -> Constant: - """Return an ngraph Constant node with the specified value.""" - ndarray = get_ndarray(value) - if dtype: - element_type = get_element_type(dtype) - else: - element_type = get_element_type(ndarray.dtype) - - return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist()) - - -def as_node(input_value: NodeInput) -> Node: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" - if issubclass(type(input_value), Node): - return input_value - if issubclass(type(input_value), Output): - return input_value - return make_constant_node(input_value) - - -def as_nodes(*input_values: NodeInput) -> List[Node]: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" - return [as_node(input_value) for input_value in input_values] diff --git a/src/bindings/python/src/compatibility/openvino/.bandit b/src/bindings/python/src/compatibility/openvino/.bandit deleted file mode 100644 index f7831187e35161..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/.bandit +++ /dev/null @@ -1,2 +0,0 @@ -[bandit] -skips: B101 diff --git a/src/bindings/python/src/compatibility/openvino/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/CMakeLists.txt deleted file mode 100644 index aa2e7093d41b1b..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# Defines the CMake commands/policies -cmake_minimum_required (VERSION 3.13) - -# Set the project name -project (ie_python_api) - -if(NOT DEFINED OpenVINO_SOURCE_DIR) - find_package(OpenVINODeveloperPackage REQUIRED - PATHS "${InferenceEngineDeveloperPackage_DIR}") -endif() - -# Python API 1.0 will be removed before 2024.0 -ov_disable_deprecated_warnings() - -if(UNIX) - # cython generated files requires public visibility. Force visibility required. - set(CMAKE_CXX_VISIBILITY_PRESET default) - set(CMAKE_C_VISIBILITY_PRESET default) -endif() - -include (cmake/UseCython.cmake) - -if(CYTHON_VERSION VERSION_LESS 0.29) - message(FATAL_ERROR "OpenVINO Python API needs at least Cython version 0.29, found version ${CYTHON_VERSION}") -else() - message(STATUS "Found Cython version ${CYTHON_VERSION}") -endif() - -# Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined in FindPython3 -set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) - -set(PYTHON_COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion}) -if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) -else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) -endif() - -function(ov_python_disable_intel_warnings target) - if(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - # 1292: unknown attribute "fallthrough" - target_compile_options(${target} PRIVATE -diag-disable=1292) - endif() -endfunction() - -set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) -add_subdirectory(inference_engine) - -if(TARGET _pyngraph) - add_dependencies(ie_api _pyngraph) -endif() - -# install - -ov_cpack_add_component(${PYTHON_COMPONENT} HIDDEN) diff --git a/src/bindings/python/src/compatibility/openvino/__init__.py b/src/bindings/python/src/compatibility/openvino/__init__.py deleted file mode 100644 index b7dc434f3148cc..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) - -# Required for Windows OS platforms -# Note: always top-level -try: - from openvino.utils import _add_openvino_libs_to_search_path - _add_openvino_libs_to_search_path() -except ImportError: - pass - -# # -# # API 2.0 -# # This __init__.py forces checking of runtime modules to propagate errors. -# # It is not compared with init files from openvino-dev package. -# # -# Import all public modules -from openvino import runtime as runtime -from openvino import frontend as frontend -from openvino import helpers as helpers -from openvino import preprocess as preprocess -from openvino import utils as utils -from openvino.runtime import properties as properties - -# Import most important classes and functions from openvino.runtime -from openvino.runtime import Model -from openvino.runtime import Core -from openvino.runtime import CompiledModel -from openvino.runtime import InferRequest -from openvino.runtime import AsyncInferQueue - -from openvino.runtime import Dimension -from openvino.runtime import Strides -from openvino.runtime import PartialShape -from openvino.runtime import Shape -from openvino.runtime import Layout -from openvino.runtime import Type -from openvino.runtime import Tensor -from openvino.runtime import OVAny - -from openvino.runtime import compile_model -from openvino.runtime import get_batch -from openvino.runtime import set_batch -from openvino.runtime import serialize -from openvino.runtime import shutdown -from openvino.runtime import tensor_from_file -from openvino.runtime import save_model -from openvino.runtime import layout_helpers - -# Set version for openvino package -from openvino.runtime import get_version -__version__ = get_version() - -# Tools -try: - # Model Conversion API - ovc should reside in the main namespace - from openvino.tools.ovc import convert_model -except ImportError: - pass diff --git a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake b/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake deleted file mode 100644 index 8eeabf849f49c5..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -# -# Following changes were done on top of original file: -# Add CYTHON_EXECUTABLE searching hints at lines 50 and 51 - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= -# Find the Cython compiler. -# -# This code sets the following variables: -# -# CYTHON_EXECUTABLE -# -# See also UseCython.cmake -# Use the Cython executable that lives next to the Python executable -# if it is a local installation. - -function( _find_cython_executable ) - find_host_package(Python3 QUIET COMPONENTS Interpreter) - if( Python3_Interpreter_FOUND ) - get_filename_component( _python_path ${Python3_EXECUTABLE} PATH ) - file(TO_CMAKE_PATH "$ENV{HOME}" ENV_HOME) - find_host_program( CYTHON_EXECUTABLE - NAMES cython cython.exe cython.bat cython3 - HINTS ${_python_path} - ${ENV_HOME}/.local/bin - $ENV{HOMEBREW_OPT}/cython/bin - ${ENV_HOME}/Library/Python/${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}/bin - ${_python_path}/Scripts - ) - else() - find_host_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - ) - endif() - - set(CYTHON_EXECUTABLE "${CYTHON_EXECUTABLE}" PARENT_SCOPE) -endfunction() - -_find_cython_executable() - -include( FindPackageHandleStandardArgs ) -FIND_PACKAGE_HANDLE_STANDARD_ARGS( Cython REQUIRED_VARS CYTHON_EXECUTABLE ) - -# Find Cython version -execute_process(COMMAND ${CYTHON_EXECUTABLE} -V - ERROR_VARIABLE CYTHON_OUTPUT - OUTPUT_VARIABLE CYTHON_ERROR_MESSAGE - RESULT_VARIABLE CYTHON_EXIT_CODE - OUTPUT_STRIP_TRAILING_WHITESPACE) - -if(CYTHON_EXIT_CODE EQUAL 0) - if(NOT CYTHON_OUTPUT) - set(CYTHON_OUTPUT "${CYTHON_ERROR_MESSAGE}") - endif() - string(REGEX REPLACE "^Cython version ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" CYTHON_VERSION "${CYTHON_OUTPUT}") -else() - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY) - if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.15) - set(CYTHON_MESSAGE_MODE TRACE) - else() - set(CYTHON_MESSAGE_MODE WARNING) - endif() - endif() - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) - set(CYTHON_MESSAGE_MODE FATAL_ERROR) - endif() - message(${CYTHON_MESSAGE_MODE} "Failed to detect cython version: ${CYTHON_ERROR_MESSAGE}") - unset(CYTHON_MESSAGE_MODE) -endif() - -unset(CYTHON_OUTPUT) -unset(CYTHON_EXIT_CODE) -unset(CYTHON_ERROR_MESSAGE) - -mark_as_advanced( CYTHON_EXECUTABLE CYTHON_VERSION ) diff --git a/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake b/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake deleted file mode 100644 index 03a208f03c233f..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake +++ /dev/null @@ -1,298 +0,0 @@ -# Define a function to create Cython modules. -# -# For more information on the Cython project, see http://cython.org/. -# "Cython is a language that makes writing C extensions for the Python language -# as easy as Python itself." -# -# This file defines a CMake function to build a Cython Python module. -# To use it, first include this file. -# -# include( UseCython ) -# -# Then call cython_add_module to create a module. -# -# cython_add_module( ... ) -# -# To avoid dependence on Python, set the Python3_LIBRARY cache variable to point -# to a static library. If a MAIN_MODULE source is specified, -# the "if __name__ == '__main__':" from that module is used as the C main() method -# for the executable. If MAIN_MODULE, the source with the same basename as -# is assumed to be the MAIN_MODULE. -# -# Where is the name of the resulting Python module and -# ... are source files to be compiled into the module, e.g. *.pyx, -# *.py, *.c, *.cxx, etc. A CMake target is created with name . This can -# be used for target_link_libraries(), etc. -# -# The sample paths set with the CMake include_directories() command will be used -# for include directories to search for *.pxd when running the Cython complire. -# -# Cache variables that effect the behavior include: -# -# CYTHON_ANNOTATE -# CYTHON_NO_DOCSTRINGS -# CYTHON_FLAGS -# -# Source file properties that effect the build process are -# -# CYTHON_IS_CXX -# -# If this is set of a *.pyx file with CMake set_source_files_properties() -# command, the file will be compiled as a C++ file. -# -# See also FindCython.cmake - -# Copyright (C) 2018-2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Following changes were done on top of the original file: -# added PRIVATE linking mode for target_link_libraries call at lines 298 and 336 - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Configuration options. -set( CYTHON_ANNOTATE OFF - CACHE BOOL "Create an annotated .html file when compiling *.pyx." ) -set( CYTHON_NO_DOCSTRINGS OFF - CACHE BOOL "Strip docstrings from the compiled module." ) -set( CYTHON_FLAGS "" CACHE STRING - "Extra flags to the cython compiler." ) -mark_as_advanced( CYTHON_ANNOTATE CYTHON_NO_DOCSTRINGS CYTHON_FLAGS ) - -find_package( Cython REQUIRED - PATHS "${CMAKE_CURRENT_SOURCE_DIR}/cmake" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH ) - -find_package(Python3 REQUIRED COMPONENTS Interpreter ${python3_development_component}) - -set( CYTHON_CXX_EXTENSION "cxx" ) -set( CYTHON_C_EXTENSION "c" ) - -# Create a *.c or *.cxx file from a *.pyx file. -# Input the generated file basename. The generate file will put into the variable -# placed in the "generated_file" argument. Finally all the *.py and *.pyx files. -function( compile_pyx _name generated_file ) - # Default to assuming all files are C. - set( cxx_arg "" ) - set( extension ${CYTHON_C_EXTENSION} ) - set( pyx_lang "C" ) - set( comment "Compiling Cython C source for ${_name}..." ) - - set( cython_include_directories "" ) - set( pxd_dependencies "" ) - set( pxi_dependencies "" ) - set( c_header_dependencies "" ) - set( pyx_locations "" ) - - foreach( pyx_file ${ARGN} ) - get_filename_component( pyx_file_basename "${pyx_file}" NAME_WE ) - - # Determine if it is a C or C++ file. - get_source_file_property( property_is_cxx ${pyx_file} CYTHON_IS_CXX ) - if( ${property_is_cxx} ) - set( cxx_arg "--cplus" ) - set( extension ${CYTHON_CXX_EXTENSION} ) - set( pyx_lang "CXX" ) - set( comment "Compiling Cython CXX source for ${_name}..." ) - endif() - - # Get the include directories. - get_source_file_property( pyx_location ${pyx_file} LOCATION ) - get_filename_component( pyx_path ${pyx_location} PATH ) - get_directory_property( cmake_include_directories DIRECTORY ${pyx_path} INCLUDE_DIRECTORIES ) - list( APPEND cython_include_directories ${cmake_include_directories} ) - list( APPEND pyx_locations "${pyx_location}" ) - - # Determine dependencies. - # Add the pxd file will the same name as the given pyx file. - unset( corresponding_pxd_file CACHE ) - find_file( corresponding_pxd_file ${pyx_file_basename}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} - NO_DEFAULT_PATH ) - if( corresponding_pxd_file ) - list( APPEND pxd_dependencies "${corresponding_pxd_file}" ) - endif() - - # Look for included pxi files - file(STRINGS "${pyx_file}" include_statements REGEX "include +['\"]([^'\"]+).*") - foreach(statement ${include_statements}) - string(REGEX REPLACE "include +['\"]([^'\"]+).*" "\\1" pxi_file "${statement}") - unset(pxi_location CACHE) - find_file(pxi_location ${pxi_file} - PATHS "${pyx_path}" ${cmake_include_directories} NO_DEFAULT_PATH) - if (pxi_location) - list(APPEND pxi_dependencies ${pxi_location}) - get_filename_component( found_pyi_file_basename "${pxi_file}" NAME_WE ) - get_filename_component( found_pyi_path ${pxi_location} PATH ) - unset( found_pyi_pxd_file CACHE ) - find_file( found_pyi_pxd_file ${found_pyi_file_basename}.pxd - PATHS "${found_pyi_path}" ${cmake_include_directories} NO_DEFAULT_PATH ) - if (found_pyi_pxd_file) - list( APPEND pxd_dependencies "${found_pyi_pxd_file}" ) - endif() - endif() - endforeach() # for each include statement found - - # pxd files to check for additional dependencies. - set( pxds_to_check "${pyx_file}" "${pxd_dependencies}" ) - set( pxds_checked "" ) - set( number_pxds_to_check 1 ) - while( ${number_pxds_to_check} GREATER 0 ) - foreach( pxd ${pxds_to_check} ) - list( APPEND pxds_checked "${pxd}" ) - list( REMOVE_ITEM pxds_to_check "${pxd}" ) - - # check for C header dependencies - file( STRINGS "${pxd}" extern_from_statements - REGEX "cdef[ ]+extern[ ]+from.*$" ) - foreach( statement ${extern_from_statements} ) - # Had trouble getting the quote in the regex - string( REGEX REPLACE "cdef[ ]+extern[ ]+from[ ]+[\"]([^\"]+)[\"].*" "\\1" header "${statement}" ) - unset( header_location CACHE ) - find_file( header_location ${header} PATHS ${cmake_include_directories} ) - if( header_location ) - list( FIND c_header_dependencies "${header_location}" header_idx ) - if( ${header_idx} LESS 0 ) - list( APPEND c_header_dependencies "${header_location}" ) - endif() - endif() - endforeach() - - # check for pxd dependencies - - # Look for cimport statements. - set( module_dependencies "" ) - file( STRINGS "${pxd}" cimport_statements REGEX cimport ) - foreach( statement ${cimport_statements} ) - if( ${statement} MATCHES from ) - string( REGEX REPLACE "from[ ]+([^ ]+).*" "\\1" module "${statement}" ) - else() - string( REGEX REPLACE "cimport[ ]+([^ ]+).*" "\\1" module "${statement}" ) - endif() - list( APPEND module_dependencies ${module} ) - endforeach() - list( REMOVE_DUPLICATES module_dependencies ) - # Add the module to the files to check, if appropriate. - foreach( module ${module_dependencies} ) - unset( pxd_location CACHE ) - find_file( pxd_location ${module}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} NO_DEFAULT_PATH ) - if( pxd_location ) - list( FIND pxds_checked ${pxd_location} pxd_idx ) - if( ${pxd_idx} LESS 0 ) - list( FIND pxds_to_check ${pxd_location} pxd_idx ) - if( ${pxd_idx} LESS 0 ) - list( APPEND pxds_to_check ${pxd_location} ) - list( APPEND pxd_dependencies ${pxd_location} ) - endif() # if it is not already going to be checked - endif() # if it has not already been checked - endif() # if pxd file can be found - endforeach() # for each module dependency discovered - endforeach() # for each pxd file to check - list( LENGTH pxds_to_check number_pxds_to_check ) - endwhile() - - - - endforeach() # pyx_file - - # Set additional flags. - if( CYTHON_ANNOTATE ) - set( annotate_arg "--annotate" ) - endif() - - if( CYTHON_NO_DOCSTRINGS ) - set( no_docstrings_arg "--no-docstrings" ) - endif() - - set( cython_debug_arg "$<$,$>:--gdb>" ) - - if( Python3_VERSION_MAJOR EQUAL 3 ) - set( version_arg "-3" ) - else() - set( version_arg ) - endif() - - # Include directory arguments. - list( REMOVE_DUPLICATES cython_include_directories ) - set( include_directory_arg "" ) - foreach( _include_dir ${cython_include_directories} ) - set( include_directory_arg ${include_directory_arg} "-I" "${_include_dir}" ) - endforeach() - - # Determining generated file name. - set( _generated_file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.${extension}" ) - set_source_files_properties( ${_generated_file} PROPERTIES GENERATED TRUE ) - set( ${generated_file} ${_generated_file} PARENT_SCOPE ) - - list( REMOVE_DUPLICATES pxd_dependencies ) - list( REMOVE_DUPLICATES c_header_dependencies ) - - # Add the command to run the compiler. - add_custom_command( OUTPUT ${_generated_file} - COMMAND ${CYTHON_EXECUTABLE} - ARGS ${cxx_arg} ${include_directory_arg} ${version_arg} - ${annotate_arg} ${no_docstrings_arg} ${cython_debug_arg} ${CYTHON_FLAGS} - --output-file ${_generated_file} ${pyx_locations} - DEPENDS ${pyx_locations} ${pxd_dependencies} ${pxi_dependencies} - IMPLICIT_DEPENDS ${pyx_lang} ${c_header_dependencies} - COMMENT ${comment} - ) - - # Remove their visibility to the user. - set( corresponding_pxd_file "" CACHE INTERNAL "" ) - set( header_location "" CACHE INTERNAL "" ) - set( pxd_location "" CACHE INTERNAL "" ) -endfunction() - -# cython_add_module( src1 src2 ... srcN ) -# Build the Cython Python module. -function( cython_add_module _name ) - set( pyx_module_sources "" ) - set( other_module_sources "" ) - foreach( _file ${ARGN} ) - if( ${_file} MATCHES ".*\\.py[x]?$" ) - list( APPEND pyx_module_sources ${_file} ) - else() - list( APPEND other_module_sources ${_file} ) - endif() - endforeach() - compile_pyx( ${_name} generated_file ${pyx_module_sources} ) - python3_add_library ( ${_name} MODULE ${generated_file} ${other_module_sources} ) - # Python3_SOABI is not defined during cross-compilation - if (Python3_SOABI AND NOT PYTHON_MODULE_EXTENSION MATCHES "^\.${Python3_SOABI}.+$") - message(FATAL_ERROR "Python3_SOABI (${Python3_SOABI}) and PYTHON_MODULE_EXTENSION (${PYTHON_MODULE_EXTENSION}) are not matching") - endif() - pybind11_extension( ${_name} ) - if( APPLE ) - set_target_properties( ${_name} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup" ) - else() - target_link_libraries( ${_name} PRIVATE ${Python3_LIBRARIES} ) - endif() -endfunction() diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt deleted file mode 100644 index dd83bad5f367a0..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_NAME "ie_api") - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) - -file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx - ${CMAKE_CURRENT_SOURCE_DIR}/*.pxd - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) - -file(GLOB PYX_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.pyx) -set_source_files_properties(${PYX_SOURCES} PROPERTIES CYTHON_IS_CXX ON) - -if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # 'argument': conversion from 'size_t' to 'int', possible loss of data - ov_add_compiler_flags(/wd4267) - ov_add_compiler_flags(/wd4244) - ov_add_compiler_flags(/wd4551) -endif() -if(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) - ov_add_compiler_flags(-Wno-undef) - if(OV_COMPILER_IS_CLANG) - ov_add_compiler_flags(-Wno-parentheses-equality) - endif() -endif() -if(UNUSED_BUT_SET_VARIABLE_SUPPORTED) - ov_add_compiler_flags(-Wno-unused-but-set-variable) -endif() - -# create target - -cython_add_module(${TARGET_NAME} ${SOURCES}) -ov_python_disable_intel_warnings(${TARGET_NAME}) -set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -set(INSTALLED_TARGETS ${TARGET_NAME}) -list(REMOVE_ITEM PYX_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx") - -foreach(PYX_FILE IN LISTS PYX_SOURCES) - get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE) - cython_add_module(${PYX_NAME} ${PYX_FILE}) - ov_python_disable_intel_warnings(${PYX_NAME}) - add_dependencies(${TARGET_NAME} ${PYX_NAME}) - target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") - target_link_libraries(${PYX_NAME} PRIVATE openvino::runtime) - list(APPEND INSTALLED_TARGETS ${PYX_NAME}) - ov_python_minimal_api(${PYX_NAME}) - set_target_properties(${PYX_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -endforeach() - -if(COMMAND ov_add_vs_version_file) - foreach(target IN LISTS INSTALLED_TARGETS) - ov_add_vs_version_file(NAME ${target} - FILEDESCRIPTION "Inference Engine Python library") - endforeach() -endif() - -function(python_ov_disable_deprecated_warnings) - ov_disable_deprecated_warnings() - set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx" "${CMAKE_CURRENT_BINARY_DIR}/constants.cxx") - set_source_files_properties(${pyx_file} PROPERTIES COMPILE_OPTIONS ${ov_c_cxx_deprecated}) -endfunction() - -python_ov_disable_deprecated_warnings() -ov_python_minimal_api(${TARGET_NAME}) - -target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) - -# Compatibility with python 2.7 which has deprecated "register" specifier -if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - target_compile_options(${TARGET_NAME} PRIVATE "-Wno-error=register") -endif() - -# perform copy -add_custom_command(TARGET ${TARGET_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py -) - -foreach(target IN LISTS INSTALLED_TARGETS) - ov_set_install_rpath(${target} ${OV_CPACK_PYTHONDIR}/openvino/inference_engine ${OV_CPACK_RUNTIMEDIR}) -endforeach() - -# install - -install(TARGETS ${INSTALLED_TARGETS} - RUNTIME DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - LIBRARY DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -install(PROGRAMS __init__.py - DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} - EXCLUDE_PATTERNS ".*\\.cxx;.*\\.pxd;.*\\.pyx") diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py b/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py deleted file mode 100644 index b7ece2fcbbd817..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys -import warnings - - -if sys.platform == "win32": - # Installer, yum, pip installs openvino dlls to the different directories - # and those paths need to be visible to the openvino modules - # - # If you're using a custom installation of openvino, - # add the location of openvino dlls to your system PATH. - # - # looking for the libs in the pip installation path by default. - openvino_libs = [os.path.join(os.path.dirname(__file__), "..", "..", "openvino", "libs")] - # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. - openvino_libs_installer = os.getenv("OPENVINO_LIB_PATHS") - if openvino_libs_installer: - openvino_libs.extend(openvino_libs_installer.split(";")) - for lib in openvino_libs: - lib_path = os.path.join(os.path.dirname(__file__), lib) - if os.path.isdir(lib_path): - # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. - if (3, 8) <= sys.version_info: - os.add_dll_directory(os.path.abspath(lib_path)) - else: - os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] - -from .ie_api import * - -warnings.warn( - message="OpenVINO Inference Engine Python API is deprecated and will be removed in 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html", - category=FutureWarning, - stacklevel=2, -) - -__all__ = ["IENetwork", "TensorDesc", "IECore", "Blob", "PreProcessInfo", "get_version"] -__version__ = get_version() # type: ignore diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx b/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx deleted file mode 100644 index d1ef004e86f853..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#cython: language_level=3 - -from .cimport ie_api_impl_defs as C - -import numpy as np -from enum import Enum - -supported_precisions = ['FP32', 'FP64', 'FP16', 'I64', 'U64', 'I32', 'U32', - 'I16', 'I4', 'I8', 'U16', 'U4', 'U8', 'BOOL', 'BIN', 'BF16'] - -known_plugins = ['CPU', 'GPU', 'HETERO', 'MULTI'] - -layout_int_to_str_map = {0: 'ANY', 1: 'NCHW', 2: 'NHWC', 3: 'NCDHW', 4: 'NDHWC', 64: 'OIHW', 95: 'SCALAR', 96: 'C', - 128: 'CHW', 192: 'HW', 193: 'NC', 194: 'CN', 200: 'BLOCKED'} - -format_map = {'FP32' : np.float32, - 'FP64' : np.float64, - 'FP16' : np.float16, - 'I64' : np.int64, - 'U64' : np.uint64, - 'I32' : np.int32, - 'U32' : np.uint32, - 'I16' : np.int16, - 'U16' : np.uint16, - 'I4' : np.int8, - 'I8' : np.int8, - 'U4' : np.int8, - 'U8' : np.uint8, - 'BOOL' : np.uint8, - 'BIN' : np.int8, - 'BF16' : np.float16, - } - -layout_str_to_enum = {'ANY': C.Layout.ANY, - 'NHWC': C.Layout.NHWC, - 'NCHW': C.Layout.NCHW, - 'NCDHW': C.Layout.NCDHW, - 'NDHWC': C.Layout.NDHWC, - 'OIHW': C.Layout.OIHW, - 'GOIHW': C.Layout.GOIHW, - 'OIDHW': C.Layout.OIDHW, - 'GOIDHW': C.Layout.GOIDHW, - 'SCALAR': C.Layout.SCALAR, - 'C': C.Layout.C, - 'CHW': C.Layout.CHW, - 'HW': C.Layout.HW, - 'NC': C.Layout.NC, - 'CN': C.Layout.CN, - 'BLOCKED': C.Layout.BLOCKED - } - - -class MeanVariant(Enum): - MEAN_IMAGE = 0 - MEAN_VALUE = 1 - NONE = 2 - - -class ResizeAlgorithm(Enum): - NO_RESIZE = 0 - RESIZE_BILINEAR = 1 - RESIZE_AREA = 2 - - -class ColorFormat(Enum): - RAW = 0 - RGB = 1 - BGR = 2 - RGBX = 3 - BGRX = 4 - - -cpdef enum StatusCode: - OK = 0 - GENERAL_ERROR = -1 - NOT_IMPLEMENTED = -2 - NETWORK_NOT_LOADED = -3 - PARAMETER_MISMATCH = -4 - NOT_FOUND = -5 - OUT_OF_BOUNDS = -6 - UNEXPECTED = -7 - REQUEST_BUSY = -8 - RESULT_NOT_READY = -9 - NOT_ALLOCATED = -10 - INFER_NOT_STARTED = -11 - NETWORK_NOT_READ = -12 - - -cpdef enum WaitMode: - RESULT_READY = -1 - STATUS_ONLY = 0 diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd deleted file mode 100644 index fd884b701800c1..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#cython: language_level=3 - -from .cimport ie_api_impl_defs as C -from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo, CExecutableNetwork, CVariableState - -import os - -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp cimport bool -from libcpp.memory cimport unique_ptr, shared_ptr - -cdef class Blob: - cdef CBlob.Ptr _ptr - cdef object _is_const - cdef public object _array_data - cdef public object _initial_shape - -cdef class BlobBuffer: - cdef CBlob.Ptr ptr - cdef char*format - cdef vector[Py_ssize_t] shape - cdef vector[Py_ssize_t] strides - cdef reset(self, CBlob.Ptr &, vector[size_t] representation_shape = ?) - cdef char*_get_blob_format(self, const CTensorDesc & desc) - - cdef public: - total_stride, item_size - -cdef class InferRequest: - cdef C.InferRequestWrap *impl - - cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name) - - cpdef infer(self, inputs = ?) - cpdef async_infer(self, inputs = ?) - cpdef wait(self, timeout = ?) - cpdef get_perf_counts(self) - cdef void user_callback(self, int status) with gil - cdef public: - _inputs_list, _outputs_list, _py_callback, _py_data, _user_blobs - -cdef class IENetwork: - cdef C.IENetwork impl - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class ExecutableNetwork: - cdef unique_ptr[C.IEExecNetwork] impl - cpdef wait(self, num_requests = ?, timeout = ?) - cpdef get_idle_request_id(self) - cdef public: - _requests, _infer_requests - -cdef class IECore: - cdef C.IECore impl - cpdef IENetwork read_network(self, model : [str, bytes, os.PathLike], - weights : [str, bytes, os.PathLike] = ?, bool init_from_buffer = ?) - cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], - device_name = ?, config = ?, int num_requests = ?) - cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config = ?, int num_requests = ?) - - -cdef class DataPtr: - cdef C.DataPtr _ptr - cdef C.IENetwork * _ptr_network - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class CDataPtr: - cdef C.CDataPtr _ptr - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class TensorDesc: - cdef C.CTensorDesc impl - -cdef class InputInfoPtr: - cdef InputInfo.Ptr _ptr - cdef C.IENetwork * _ptr_network - -cdef class InputInfoCPtr: - cdef InputInfo.CPtr _ptr - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class PreProcessInfo: - cdef CPreProcessInfo* _ptr - cdef const CPreProcessInfo* _cptr - cdef object _user_data - -cdef class PreProcessChannel: - cdef CPreProcessChannel.Ptr _ptr - -cdef class VariableState: - cdef C.CVariableState impl diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx deleted file mode 100644 index 7dade4aa4d871c..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx +++ /dev/null @@ -1,1854 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#distutils: language=c++ -#cython: embedsignature=True -#cython: language_level=3 - -from cython.operator cimport dereference as deref -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp cimport bool -from libcpp.map cimport map -from libcpp.memory cimport unique_ptr -from libc.stdlib cimport malloc, free -from libc.stdint cimport int64_t, uint8_t, int8_t, int32_t, uint16_t, int16_t, uint32_t, uint64_t -from libc.stddef cimport size_t -from libc.string cimport memcpy - -import os -from fnmatch import fnmatch -import threading -import warnings -from copy import deepcopy -from collections import namedtuple - -from .cimport ie_api_impl_defs as C -from .ie_api_impl_defs cimport SizeVector, Precision -from .constants import WaitMode, StatusCode, MeanVariant, layout_str_to_enum, format_map, layout_int_to_str_map,\ - known_plugins, supported_precisions, ResizeAlgorithm, ColorFormat - -import numpy as np - -warnings.filterwarnings(action="module", category=DeprecationWarning) - -cdef extern from "" namespace "std" nogil: - cdef unique_ptr[C.IEExecNetwork] move(unique_ptr[C.IEExecNetwork]) - -cdef to_py_string(const string & std_string): - return bytes(std_string).decode() - -cdef dict_to_c_map(py_dict): - cdef map[string, string] c_map - for k, v in py_dict.items(): - if type(k) != str or type(v) != str: - raise TypeError("Only string keys and values are allowed!") - c_map[k.encode()] = v.encode() - return c_map - -cdef c_map_to_dict(map[string, string] c_map): - py_dict = {} - for v in c_map: - py_dict[v.first.decode()] = v.second.decode() - return py_dict - - -def get_version(): - return C.get_version().decode() - - -def read_network(path_to_xml : str, path_to_bin : str): - cdef IENetwork net = IENetwork() - net.impl = C.read_network(path_to_xml.encode(), path_to_bin.encode()) - return net - - -cdef class VariableState: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class manages data for reset operations - """ - - def reset(self): - """ - Reset internal variable state for relevant infer request - to a value specified as default for according ReadValue node - """ - self.impl.reset() - - - @property - def state(self): - """ - Returns the value of the variable state. - """ - blob = Blob() - blob._ptr = self.impl.getState() - blob._is_const = True - return blob - - @state.setter - def state(self, blob : Blob): - self.impl.setState(blob._ptr) - - @property - def name(self): - """ - A string representing a state name - """ - return to_py_string(self.impl.getName()) - - -cdef class TensorDesc: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class defines Tensor description - """ - - def __eq__(self, other : TensorDesc): - return self.layout == other.layout and self.precision == other.precision and self.dims == other.dims - - def __ne__(self, other : TensorDesc): - return self.layout != other.layout or self.precision != other.precision or self.dims != other.dims - - def __deepcopy__(self, memodict={}): - return TensorDesc(deepcopy(self.precision, memodict), deepcopy(self.dims, memodict), deepcopy(self.layout, memodict)) - - - def __cinit__(self, precision : str, dims : [list, tuple], layout : str): - """Class constructor - - :param precision: target memory precision - :param dims: target memory dimensions - :param layout: target memory layout - :return: Instance of defines class - """ - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - self.impl = C.CTensorDesc(C.Precision.FromStr(precision.encode()), dims, layout_str_to_enum[layout]) - - - @property - def dims(self): - """ - Shape (dimensions) of the :class:`TensorDesc` object - """ - return self.impl.getDims() - - @dims.setter - def dims(self, dims_array : [list, tuple]): - self.impl.setDims(dims_array) - - - @property - def precision(self): - """ - Precision of the :class:`TensorDesc` object - """ - return self.impl.getPrecision().name().decode() - - @precision.setter - def precision(self, precision : str): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - self.impl.setPrecision(C.Precision.FromStr(precision.encode())) - - - @property - def layout(self): - """ - Layout of the :class:`TensorDesc` object - """ - return layout_int_to_str_map[self.impl.getLayout()] - - @layout.setter - def layout(self, layout : str): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - self.impl.setLayout(layout_str_to_enum[layout]) - - -cdef class Blob: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents Blob - """ - - def __cinit__(self, TensorDesc tensor_desc = None, array : np.ndarray = None): - """Class constructor - - :param tensor_desc: :class:`TensorDesc` object describing creating Blob object. - :param array: numpy.ndarray with data to fill blob memory, The array have to have same elements count - as specified in tensor_desc.dims attribute and same elements precision corresponding to - tensor_desc.precision. If array isn't provided empty numpy.ndarray will be created accorsing - to parameters of tensor_desc - :return: Instance of Blob class - """ - cdef CTensorDesc c_tensor_desc - cdef float[::1] fp32_array_memview - cdef double[::1] fp64_array_memview - cdef int16_t[::1] I16_array_memview - cdef uint16_t[::1] U16_array_memview - cdef uint8_t[::1] U8_array_memview - cdef int8_t[::1] I8_array_memview - cdef int32_t[::1] I32_array_memview - cdef int64_t[::1] I64_array_memview - cdef uint32_t[::1] U32_array_memview - cdef uint64_t[::1] U64_array_memview - - self._is_const = False - self._array_data = array - self._initial_shape = array.shape if array is not None else None - - if self._array_data is not None: - if np.isfortran(self._array_data): - self._array_data = self._array_data.ravel(order="F") - else: - self._array_data = self._array_data.ravel(order="C") - if self._array_data is None and tensor_desc is not None: - c_tensor_desc = tensor_desc.impl - precision = tensor_desc.precision - if precision == "FP32": - self._ptr = C.make_shared_blob[float](c_tensor_desc) - elif precision == "FP64": - self._ptr = C.make_shared_blob[double](c_tensor_desc) - elif precision == "FP16" or precision == "I16" or precision == "BF16": - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc) - elif precision == "Q78" or precision == "U16": - self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc) - elif precision == "U8" or precision == "BOOL": - self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc) - elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4": - self._ptr = C.make_shared_blob[int8_t](c_tensor_desc) - elif precision == "I32": - self._ptr = C.make_shared_blob[int32_t](c_tensor_desc) - elif precision == "U32": - self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc) - elif precision == "I64": - self._ptr = C.make_shared_blob[int64_t](c_tensor_desc) - elif precision == "U64": - self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc) - else: - raise AttributeError(f"Unsupported precision {precision} for blob") - deref(self._ptr).allocate() - elif tensor_desc is not None and self._array_data is not None: - c_tensor_desc = tensor_desc.impl - precision = tensor_desc.precision - size_td = C.product(c_tensor_desc.getDims()) - if array.size != size_td: - raise AttributeError(f"Number of elements in provided numpy array {array.size} and " - f"required by TensorDesc {size_td} are not equal") - if self._array_data.dtype != format_map[precision]: - raise ValueError(f"Data type {self._array_data.dtype} of provided numpy array " - f"doesn't match to TensorDesc precision {precision}") - if not self._array_data.flags['C_CONTIGUOUS']: - self._array_data = np.ascontiguousarray(self._array_data) - if precision == "FP32": - fp32_array_memview = self._array_data - self._ptr = C.make_shared_blob[float](c_tensor_desc, &fp32_array_memview[0], fp32_array_memview.shape[0]) - elif precision == "FP64": - fp64_array_memview = self._array_data - self._ptr = C.make_shared_blob[double](c_tensor_desc, &fp64_array_memview[0], fp64_array_memview.shape[0]) - elif precision == "FP16" or precision == "BF16": - I16_array_memview = self._array_data.view(dtype=np.int16) - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc, &I16_array_memview[0], I16_array_memview.shape[0]) - elif precision == "I16": - I16_array_memview = self._array_data - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc, &I16_array_memview[0], I16_array_memview.shape[0]) - elif precision == "Q78" or precision == "U16": - U16_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc, &U16_array_memview[0], U16_array_memview.shape[0]) - elif precision == "U8" or precision == "BOOL": - U8_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc, &U8_array_memview[0], U8_array_memview.shape[0]) - elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4": - I8_array_memview = self._array_data - self._ptr = C.make_shared_blob[int8_t](c_tensor_desc, &I8_array_memview[0], I8_array_memview.shape[0]) - elif precision == "I32": - I32_array_memview = self._array_data - self._ptr = C.make_shared_blob[int32_t](c_tensor_desc, &I32_array_memview[0], I32_array_memview.shape[0]) - elif precision == "U32": - U32_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc, &U32_array_memview[0], U32_array_memview.shape[0]) - elif precision == "I64": - I64_array_memview = self._array_data - self._ptr = C.make_shared_blob[int64_t](c_tensor_desc, &I64_array_memview[0], I64_array_memview.shape[0]) - elif precision == "U64": - U64_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc, &U64_array_memview[0], U64_array_memview.shape[0]) - else: - raise AttributeError(f"Unsupported precision {precision} for blob") - - def __deepcopy__(self, memodict): - res = Blob(deepcopy(self.tensor_desc, memodict), deepcopy(self._array_data, memodict)) - res.buffer[:] = deepcopy(self.buffer[:], memodict) - return res - - @property - def buffer(self): - """ - Blob's memory as :class:`numpy.ndarray` representation - """ - representation_shape = self._initial_shape if self._initial_shape is not None else [] - cdef BlobBuffer buffer = BlobBuffer() - buffer.reset(self._ptr, representation_shape) - return buffer.to_numpy(self._is_const) - - - @property - def tensor_desc(self): - """ - :class:`TensorDesc` of created Blob - """ - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - return tensor_desc - - def set_shape(self, new_shape): - self._initial_shape = new_shape - deref(self._ptr).setShape(new_shape) - - -## This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. -cdef class IECore: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. - """ - - def __cinit__(self, xml_config_file: str = ""): - """Class constructor - - :param xml_config_file: A full path to `.xml` file containing plugins configuration. - If the parameter is not specified, the default configuration is handled automatically. - :return: Instance of IECore class - """ - cdef string c_xml_config_file = xml_config_file.encode() - with nogil: - self.impl = C.IECore(c_xml_config_file) - - - def get_versions(self, device_name: str): - """Get a :class:`collections.namedtuple` object with versions of the plugin specified - - :param device_name: Name of the the registered plugin - :return: Dictionary mapping a plugin name and `Versions` :class:`collections.namedtuple` object with the following fields: - - * `major` - major plugin integer version - * `minor` - minor plugin integer version - * `build_number` - plugin build number string - * `description` - plugin description string - """ - cdef map[string, C.Version] versions_ - versions_ = self.impl.getVersions(device_name.encode()) - versions = {} - for v in versions_: - device = v.first.decode() - ver = v.second - versions[device] = namedtuple("Versions", ["major", "minor", "build_number", "description"]) - versions[device].build_number = ver.buildNumber.decode() - versions[device].description = ver.description.decode() - versions[device].minor = ver.apiVersion.minor - versions[device].major = ver.apiVersion.major - return versions - - ## Reads a network from Intermediate Representation (IR) or ONNX formats and creates an `IENetwork`. - # @param model: A `.xml` or `.onnx` model file or string with IR. - # @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or - # bytes with file content. - # @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. - # If `False`, attributes are interpreted as strings with paths to .xml and .bin files - # of IR. If `True`, they are interpreted as Python `bytes` object with .xml and .bin files content. - # @return An `IENetwork` object - # - # Usage example:\n - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # ``` - cpdef IENetwork read_network(self, model: [str, bytes, os.PathLike], weights: [str, bytes, os.PathLike] = "", init_from_buffer: bool = False): - """Reads a network from Intermediate Representation (IR) or ONNX formats and creates an :class:`IENetwork`. - - :param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR. - :param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or - bytes with file content. - :param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. - If `False`, attributes are interpreted as strings with paths to `.xml` and `.bin` files - of IR. If `True`, they are interpreted as Python `bytes` object with `.xml` and `.bin` files content. - - :return: An :class:`IENetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - """ - cdef uint8_t*bin_buffer - cdef string weights_ - cdef string model_ - cdef IENetwork net = IENetwork() - cdef size_t bin_size - if init_from_buffer: - model_ = bytes(model) - bin_buffer = weights - bin_size = len(weights) - with nogil: - net.impl = self.impl.readNetwork(model_, bin_buffer, bin_size) - else: - weights_ = "".encode() - model = os.fspath(model) - if not os.path.isfile(model): - raise Exception(f"Path to the model {model} doesn't exist or it's a directory") - model_ = model.encode() - - if not (fnmatch(model, "*.onnx") or fnmatch(model, "*.prototxt")) and weights: - weights = os.fspath(weights) - if not os.path.isfile(weights): - raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory") - weights_ = weights.encode() - with nogil: - net.impl = self.impl.readNetwork(model_, weights_) - return net - - cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], device_name=None, config=None, int num_requests=1): - """Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name - and creates an :class:`ExecutableNetwork` object of the :class:`IENetwork` class. - You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware - resources). - - :param network: A valid :class:`IENetwork` instance. Model file name .xml, .onnx can also be passed as argument - :param device_name: A device name of a target plugin, if no device_name is set then it will use AUTO device as default. - :param config: A dictionary of plugin configuration keys and their values - :param num_requests: A positive integer value of infer requests to be created. - Number of infer requests is limited by device capabilities. Value `0` indicates that optimal number of infer requests will be created. - - :return: An :class:`ExecutableNetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - """ - cdef ExecutableNetwork exec_net = ExecutableNetwork() - cdef map[string, string] c_config - cdef string c_device_name - cdef string c_network_path - if num_requests < 0: - raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number " - "or zero for auto detection") - if config: - c_config = dict_to_c_map(config) - if device_name: - c_device_name = device_name.encode() - if isinstance(network, str): - c_network_path = network.encode() - if device_name: - with nogil: - exec_net.impl = move(self.impl.loadNetworkFromFile(c_network_path, c_device_name, c_config, num_requests)) - else: - with nogil: - exec_net.impl = move(self.impl.loadNetworkFromFile(c_network_path, c_config, num_requests)) - else: - if device_name: - with nogil: - exec_net.impl = move(self.impl.loadNetwork((network).impl, c_device_name, c_config, num_requests)) - else: - with nogil: - exec_net.impl = move(self.impl.loadNetwork((network).impl, c_config, num_requests)) - return exec_net - - cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config=None, int num_requests=1): - """Creates an executable network from a previously exported network - - :param device_name: Name of device load executable network on - :param model_file: Full path to the location of the exported file - :param config: A dictionary of plugin configuration keys and their values - :param num_requests: A positive integer value of infer requests to be created. Number of infer requests is limited - by device capabilities. - Value `0` indicates that optimal number of infer requests will be created. - - :return: An :class:`ExecutableNetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - # export executable network - exec_net.export(path_to_file_to_save) - # import previously exported executable network - exec_net_imported = ie.import_network(model_file=path_to_file_to_save, device_name="CPU") - """ - cdef ExecutableNetwork exec_net = ExecutableNetwork() - cdef map[string, string] c_config - if num_requests < 0: - raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number " - "or zero for auto detection") - if config: - c_config = dict_to_c_map(config) - exec_net.impl = move(self.impl.importNetwork(model_file.encode(), device_name.encode(), c_config, num_requests)) - return exec_net - - - def query_network(self, IENetwork network, str device_name, config=None): - """Queries the plugin with specified device name what network layers are supported in the current configuration. - Please note that layers support depends on plugin configuration and loaded extensions. - - :param network: A valid :class:`IENetwork` instance - :param device_name: A device name of a target plugin - :param config: A dictionary of plugin configuration keys and their values - :return: A dictionary mapping layers and device names on which they are supported - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU") - """ - cdef map[string, string] c_config - if config: - c_config = dict_to_c_map(config) - res = self.impl.queryNetwork(network.impl, device_name.encode(), c_config) - return c_map_to_dict(res) - - - def set_config(self, config: dict, device_name: str): - """Sets a configuration for a plugin - - .. note:: When specifying a key value of a config, the "KEY_" prefix is omitted. - - :param config: a dictionary of configuration parameters as keys and their values - :param device_name: a device name of a target plugin - :return: None - - Usage examples:\n - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - ie.set_config(config={"PERF_COUNT": "YES"}, device_name="CPU") - """ - cdef map[string, string] c_config = dict_to_c_map(config) - self.impl.setConfig(c_config, device_name.encode()) - - - def register_plugin(self, plugin_name: str, device_name: str = ""): - """Register a new device and plugin that enables this device inside OpenVINO Runtime. - - :param plugin_name: A path (absolute or relative) or name of a plugin. Depending on platform, - `plugin` is wrapped with shared library suffix and prefix to identify library full name - :param device_name: A target device name for the plugin. If not specified, the method registers - a plugin with the default name. - - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.register_plugin(plugin_name="openvino_intel_cpu_plugin", device_name="MY_NEW_PLUGIN") - """ - self.impl.registerPlugin(plugin_name.encode(), device_name.encode()) - - - def register_plugins(self, xml_config_file: str): - """Registers plugins specified in an `.xml` configuration file - - :param xml_config_file: A full path to `.xml` file containing plugins configuration - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.register_plugins("/localdisk/plugins/my_custom_cfg.xml") - """ - self.impl.registerPlugins(xml_config_file.encode()) - - - def unregister_plugin(self, device_name: str): - """Unregisters a plugin with a specified device name - - :param device_name: A device name of the plugin to unregister - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.unregister_plugin(device_name="GPU") - """ - self.impl.unregisterPlugin(device_name.encode()) - - - def add_extension(self, extension_path: str, device_name: str): - """Loads extension library to the plugin with a specified device name - - :param extension_path: Path to the extensions library file to load to a plugin - :param device_name: A device name of a plugin to load the extensions to - :return: None - - Usage example:\n - - .. code-block:: python - - ie = IECore() - ie.add_extension(extension_path="/some_dir/libcpu_extension_avx2.so", device_name="CPU") - """ - self.impl.addExtension(extension_path.encode(), device_name.encode()) - - def get_metric(self, device_name: str, metric_name: str): - """ - Gets a general runtime metric for dedicated hardware. Enables to request common device properties, - which are :class:`ExecutableNetwork` agnostic, such as device name, temperature, and other devices-specific values. - - :param device_name: A name of a device to get a metric value. - :param metric_name: A metric name to request. - :return: A metric value corresponding to a metric key. - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.get_metric(metric_name="SUPPORTED_METRICS", device_name="CPU") - """ - return self.impl.getMetric(device_name.encode(), metric_name.encode()) - - - def get_config(self, device_name: str, config_name: str): - """Gets a configuration dedicated to device behavior. The method targets to extract information - which can be set via set_config method. - - .. note:: When specifying a key value of a config, the "KEY_" prefix is omitted. - - :param device_name: A name of a device to get a config value. - :param config_name: A config name to request. - :return: A config value corresponding to a config key. - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.get_config(device_name="CPU", config_name="CPU_BIND_THREAD") - """ - return self.impl.getConfig(device_name.encode(), config_name.encode()) - - ## A list of devices. The devices are returned as \[CPU, GPU.0, GPU.1\]. - # If there are more than one device of a specific type, they all are listed followed by a dot and a number. - @property - def available_devices(self): - """ - A list of devices. The devices are returned as \[CPU, GPU.0, GPU.1\]. - If there are more than one device of a specific type, they all are listed followed by a dot and a number. - """ - cdef vector[string] c_devices = self.impl.getAvailableDevices() - return [d.decode() for d in c_devices] - -cdef class PreProcessChannel: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This structure stores info about pre-processing of network inputs (scale, mean image, ...) - """ - - property mean_value: - def __get__(self): - return deref(self._ptr).meanValue - - def __set__(self, float mean_value): - deref(self._ptr).meanValue = mean_value - property std_scale: - def __get__(self): - return deref(self._ptr).stdScale - - def __set__(self, float std_scale): - deref(self._ptr).stdScale = std_scale - property mean_data: - def __get__(self): - blob = Blob() - blob._ptr = deref(self._ptr).meanData - return blob - - def __set__(self, Blob mean_data): - deref(self._ptr).meanData = mean_data._ptr - - -cdef class PreProcessInfo: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class stores pre-process information for the input - """ - - def __cinit__(self): - self._ptr = new CPreProcessInfo() - self._cptr = self._ptr - self._user_data = True - - def __dealloc__(self): - if self._user_data: - del self._ptr - - def __getitem__(self, size_t index): - cdef CPreProcessChannel.Ptr c_channel = deref(self._cptr)[index] - channel = PreProcessChannel() - channel._ptr = c_channel - return channel - - - def get_number_of_channels(self): - """ - Returns a number of channels to preprocess - """ - return deref(self._cptr).getNumberOfChannels() - - - def init(self, const size_t number_of_channels): - """ - Initializes with given number of channels - """ - if not self._ptr: - raise TypeError("Cannot initialized when created from constant") - deref(self._ptr).init(number_of_channels) - - - def set_mean_image(self, Blob mean_image): - """ - Sets mean image values if operation is applicable. - Also sets the mean type to MEAN_IMAGE for all channels - """ - if not self._ptr: - raise TypeError("Cannot set mean image when called from constant") - deref(self._ptr).setMeanImage(mean_image._ptr) - - - def set_mean_image_for_channel(self, Blob mean_image, size_t channel): - """ - Sets mean image values if operation is applicable. - Also sets the mean type to MEAN_IMAGE for a particular channel - """ - if not self._ptr: - raise TypeError("Cannot set mean image for channel when called from constant") - deref(self._ptr).setMeanImageForChannel(mean_image._ptr, channel) - - @property - def mean_variant(self): - """Mean Variant to be applied for input before inference if needed. - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.mean_variant = MeanVariant.MEAN_IMAGE - """ - return MeanVariant(deref(self._cptr).getMeanVariant()) - - @mean_variant.setter - def mean_variant(self, variant : MeanVariant): - if not self._ptr: - raise TypeError("Cannot set mean image when called from constant") - deref(self._ptr).setVariant(variant.value) - - - @property - def resize_algorithm(self): - """ - Resize Algorithm to be applied for input before inference if needed. - .. note:: - - It's need to set your input via the set_blob method. - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR - exec_net = ie_core.load_network(net, 'CPU') - tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW") - img_blob = ie.Blob(tensor_desc, image) - request = exec_net.requests[0] - request.set_blob('data', img_blob) - request.infer() - """ - return ResizeAlgorithm(deref(self._cptr).getResizeAlgorithm()) - - @resize_algorithm.setter - def resize_algorithm(self, alg : ResizeAlgorithm): - if not self._ptr: - raise TypeError("Cannot set resize algorithm when called from constant") - deref(self._ptr).setResizeAlgorithm(alg.value) - - - @property - def color_format(self): - """ - Color format to be used in on-demand color conversions applied to input before inference - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.color_format = ColorFormat.BGR - """ - return ColorFormat(deref(self._cptr).getColorFormat()) - - @color_format.setter - def color_format(self, fmt : ColorFormat): - if not self._ptr: - raise TypeError("Cannot set color format when called from constant") - deref(self._ptr).setColorFormat(fmt.value) - - -cdef class InputInfoPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class contains information about each input of the network - """ - - @property - def name(self): - """ - Name of this input - """ - return deref(self._ptr).name().decode() - - @property - def precision(self): - """ - Precision of this input - """ - return deref(self._ptr).getPrecision().name().decode() - - @precision.setter - def precision(self, precision : str): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - deref(self._ptr).setPrecision(C.Precision.FromStr(precision.encode())) - - @property - def layout(self): - """ - Layout of this input - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @layout.setter - def layout(self, layout : str): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - deref(self._ptr).setLayout(layout_str_to_enum[layout]) - - - @property - def preprocess_info(self): - """Gets pre-process info for the input - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.color_format = ColorFormat.BGR - """ - cdef CPreProcessInfo* c_preprocess_info = &deref(self._ptr).getPreProcess() - preprocess_info = PreProcessInfo() - del preprocess_info._ptr - preprocess_info._user_data = False - preprocess_info._ptr = c_preprocess_info - preprocess_info._cptr = c_preprocess_info - return preprocess_info - - @property - def tensor_desc(self): - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - tensor_desc.impl = c_tensor_desc - return tensor_desc - - @property - def input_data(self): - """ - Get access to DataPtr object - """ - cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData() - data_ptr = DataPtr() - data_ptr._ptr_network = self._ptr_network - data_ptr._ptr = c_data_ptr - return data_ptr - - @input_data.setter - def input_data(self, input_ptr : DataPtr): - deref(self._ptr).setInputData(input_ptr._ptr) - - -cdef class InputInfoCPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class contains const information about each input of the network. - Provides same interface as InputInfoPtr object except properties setters - """ - - @property - def name(self): - """ - Name of this input - """ - return deref(self._ptr).name().decode() - - @property - def precision(self): - """ - Precision of this input - """ - return deref(self._ptr).getPrecision().name().decode() - - @property - def input_data(self): - """ - Get access to DataPtr object - """ - cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData() - data_ptr = DataPtr() - data_ptr._ptr = c_data_ptr - data_ptr._ptr_plugin = self._ptr_plugin - return data_ptr - - @property - def tensor_desc(self): - """ - tensor_desc of this input - """ - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - tensor_desc.impl = c_tensor_desc - return tensor_desc - - -cdef class DataPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class is the layer data representation. - """ - - def __init__(self): - """ - Default constructor - """ - self._ptr_network = NULL - - @property - def name(self): - """ - Name of the data object - """ - return deref(self._ptr).getName().decode() - - @property - def precision(self): - """ - Precision of the data object - """ - return deref(self._ptr).getPrecision().name().decode() - - @precision.setter - def precision(self, precision): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - deref(self._ptr).setPrecision(C.Precision.FromStr(precision.encode())) - - @property - def shape(self): - """ - Shape (dimensions) of the data object - """ - return deref(self._ptr).getDims() - - @property - def layout(self): - """ - Layout of the data object - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @layout.setter - def layout(self, layout): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - deref(self._ptr).setLayout(layout_str_to_enum[layout]) - - @property - def initialized(self): - """ - Checks if the current data object is resolved - """ - return deref(self._ptr).isInitialized() - - -cdef class CDataPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class is the layer constant data representation. Provides same interface as DataPtr object except properties setters - """ - - @property - def name(self): - """ - Name of the data object - """ - return deref(self._ptr).getName().decode() - - @property - def precision(self): - """ - Precision of the data object - """ - return deref(self._ptr).getPrecision().name().decode() - - @property - def shape(self): - """ - Shape (dimensions) of the data object - """ - return deref(self._ptr).getDims() - - @property - def layout(self): - """ - Layout of the data object - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @property - def initialized(self): - """ - Checks if the current data object is resolved - """ - return deref(self._ptr).isInitialized() - - -cdef class ExecutableNetwork: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents a network instance loaded to plugin and ready for inference. - """ - - def __init__(self): - """ - There is no explicit class constructor. To make a valid instance of :class:`ExecutableNetwork`, - use :func:`IECore.load_network` method of the :class:`IECore` class. - """ - self._infer_requests = [] - - def infer(self, inputs=None): - """Starts synchronous inference for the first infer request of the executable network and returns output data. - Wraps :func:`InferRequest.infer` method of the :class:`InferRequest` class - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper shape with - input data for the layer - :return: A dictionary that maps output layer names to :class:`numpy.ndarray` objects with output data of the layer - - Usage example: - - .. code-block:: python - - ie_core = IECore() - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - res = exec_net.infer({'data': img}) - res - {'prob': array([[[[2.83426580e-08]], - [[2.40166020e-08]], - [[1.29469613e-09]], - [[2.95946148e-08]] - ...... - ]])} - """ - current_request = self.requests[0] - current_request.infer(inputs) - res = {} - for name, value in current_request.output_blobs.items(): - res[name] = deepcopy(value.buffer) - return res - - def start_async(self, request_id, inputs=None): - """ - Starts asynchronous inference for specified infer request. - Wraps :func:`InferRequest.async_infer` method of the :class:`InferRequest` class. - - :param request_id: Index of infer request to start inference - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper - shape with input data for the layer - :return: A handler of specified infer request, which is an instance of the :class:`InferRequest` class. - - Usage example: - - .. code-block:: python - - infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image}) - infer_status = infer_request_handle.wait() - res = infer_request_handle.output_blobs[out_blob_name] - """ - if request_id not in list(range(len(self.requests))): - raise ValueError("Incorrect request_id specified!") - current_request = self.requests[request_id] - current_request.async_infer(inputs) - return current_request - - - @property - def requests(self): - """ - A tuple of :class:`InferRequest` instances - """ - cdef int c_infer_requests_size = deref(self.impl).infer_requests.size() - if len(self._infer_requests) == 0: - for i in range(c_infer_requests_size): - infer_request = InferRequest() - infer_request.impl = &(deref(self.impl).infer_requests[i]) - infer_request._inputs_list = list(self.input_info.keys()) - infer_request._outputs_list = list(self.outputs.keys()) - self._infer_requests.append(infer_request) - - if len(self._infer_requests) != c_infer_requests_size: - raise Exception("Mismatch of infer requests number!") - - return self._infer_requests - - @property - def input_info(self): - """ - A dictionary that maps input layer names to InputInfoCPtr objects - """ - cdef map[string, C.InputInfo.CPtr] c_inputs = deref(self.impl).getInputsInfo() - inputs = {} - cdef InputInfoCPtr input_info_ptr - for in_ in c_inputs: - input_info_ptr = InputInfoCPtr() - input_info_ptr._ptr = in_.second - input_info_ptr._ptr_plugin = deref(self.impl).getPluginLink() - inputs[in_.first.decode()] = input_info_ptr - return inputs - - ## A dictionary that maps output layer names to CDataPtr objects - @property - def outputs(self): - """ - A dictionary that maps output layer names to CDataPtr objects - """ - cdef map[string, C.CDataPtr] c_outputs = deref(self.impl).getOutputs() - outputs = {} - cdef CDataPtr data_ptr - for in_ in c_outputs: - data_ptr = CDataPtr() - data_ptr._ptr = in_.second - data_ptr._ptr_plugin = deref(self.impl).getPluginLink() - outputs[in_.first.decode()] = data_ptr - return outputs - - - def get_exec_graph_info(self): - """Gets executable graph information from a device - - :return: An instance of :class:`IENetwork` - - Usage example: - - .. code-block:: python - - ie_core = IECore() - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie_core.load_network(net, device, num_requests=2) - exec_graph = exec_net.get_exec_graph_info() - """ - ie_network = IENetwork() - ie_network.impl = deref(self.impl).GetExecGraphInfo() - ie_network._ptr_plugin = deref(self.impl).getPluginLink() - return ie_network - - - def get_metric(self, metric_name: str): - """Gets general runtime metric for an executable network. It can be network name, actual device ID on - which executable network is running or all other properties which cannot be changed dynamically. - - :param metric_name: A metric name to request. - :return: A metric value corresponding to a metric key. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(net, "CPU") - exec_net.get_metric("NETWORK_NAME") - """ - return deref(self.impl).getMetric(metric_name.encode()) - - - def get_config(self, config_name: str): - """Gets configuration for current executable network. The method is responsible to extract information - which affects executable network execution - - :param config_name: A configuration parameter name to request. - :return: A configuration value corresponding to a configuration key. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(net, "CPU") - config = exec_net.get_config("CPU_BIND_THREAD") - """ - return deref(self.impl).getConfig(config_name.encode()) - - ## Sets configuration for current executable network. - # - # @param config: a dictionary of configuration parameters as keys and their values - # @return None - # - # Usage example:\n - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # exec_net = ie.load_network(net, "GNA") - # config = exec_net.set_config({"DEVICE_MODE" : "GNA_SW_EXACT"}) - # ``` - def set_config(self, config: dict): - cdef map[string, string] c_config = dict_to_c_map(config) - deref(self.impl).setConfig(c_config) - - ## Exports the current executable network. - # @param model_file Full path to the target exported file location - # @return None - # - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - # exec_net.export(path_to_file_to_save) - # ``` - def export(self, model_file: str): - """Exports the current executable network. - - :param model_file: Full path to the target exported file location - :return: None - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.export(path_to_file_to_save) - """ - deref(self.impl).exportNetwork(model_file.encode()) - - cpdef wait(self, num_requests=None, timeout=None): - """Waits when the result from any request becomes available. Blocks until specified timeout elapses or the result. - - :param num_requests: Number of idle requests for which wait. - If not specified, `num_requests` value is set to number of requests by default. - :param timeout: Time to wait in milliseconds or special (0, -1) cases described above. - If not specified, `timeout` value is set to -1 by default. - :return: Request status code: `OK` or `RESULT_NOT_READY` - """ - cdef int status_code - cdef int64_t c_timeout - cdef int c_num_requests - if num_requests is None: - num_requests = len(self.requests) - c_num_requests = num_requests - if timeout is None: - timeout = WaitMode.RESULT_READY - c_timeout = timeout - with nogil: - status_code = deref(self.impl).wait(c_num_requests, c_timeout) - return status_code - - - cpdef get_idle_request_id(self): - """ - Get idle request ID - - :return: Request index - """ - return deref(self.impl).getIdleRequestId() - -ctypedef extern void (*cb_type)(void*, int) with gil - - -cdef class InferRequest: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class provides an interface to infer requests of :class:`ExecutableNetwork` and serves - to handle infer requests execution and to set and get output data. - """ - - def __init__(self): - """ - There is no explicit class constructor. To make a valid :class:`InferRequest` instance, use :func:`IECore.load_network` - method of the :class:`IECore` class with specified number of requests to get :class:`ExecutableNetwork` instance - which stores infer requests. - """ - self._user_blobs = {} - self._inputs_list = [] - self._outputs_list = [] - self._py_callback = lambda *args, **kwargs: None - self._py_data = None - - cdef void user_callback(self, int status) with gil: - if self._py_callback: - self._py_callback(status, self._py_data) - - def set_completion_callback(self, py_callback, py_data = None): - """Description: Sets a callback function that is called on success or failure of an asynchronous request - - :param py_callback: Any defined or lambda function - :param py_data: Data that is passed to the callback function - :return: None - - Usage example: - - .. code-block:: python - - callback = lambda status, py_data: print(f"Request with id {py_data} finished with status {status}") - ie = IECore() - net = ie.read_network(model="./model.xml", weights="./model.bin") - exec_net = ie.load_network(net, "CPU", num_requests=4) - for id, req in enumerate(exec_net.requests): - req.set_completion_callback(py_callback=callback, py_data=id) - - for req in exec_net.requests: - req.async_infer({"data": img}) - """ - self._py_callback = py_callback - self._py_data = py_data - deref(self.impl).setCyCallback( self.user_callback, self) - - cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name): - cdef BlobBuffer buffer = BlobBuffer() - cdef CBlob.Ptr blob_ptr - blob_ptr = deref(self.impl).getBlobPtr(blob_name) - buffer.reset(blob_ptr) - return buffer - - - @property - def input_blobs(self): - """ - Dictionary that maps input layer names to corresponding Blobs - """ - input_blobs = {} - for input in self._inputs_list: - # TODO: will not work for setting data via .inputs['data'][:] - if input in self._user_blobs: - input_blobs[input] = self._user_blobs[input] - else: - blob = Blob() - blob._ptr = deref(self.impl).getBlobPtr(input.encode()) - input_blobs[input] = blob - return input_blobs - - @property - def output_blobs(self): - """ - Dictionary that maps output layer names to corresponding Blobs - """ - output_blobs = {} - for output in self._outputs_list: - blob = Blob() - blob._ptr = deref(self.impl).getBlobPtr(output.encode()) - output_blobs[output] = deepcopy(blob) - return output_blobs - - @property - def preprocess_info(self): - """ - Dictionary that maps input layer names to corresponding preprocessing information - """ - preprocess_info = {} - for input_blob in self.input_blobs.keys(): - preprocess = PreProcessInfo() - del preprocess._ptr - preprocess._user_data = False - preprocess._ptr = NULL - preprocess._cptr = &deref(self.impl).getPreProcess(input_blob.encode()) - preprocess_info[input_blob] = preprocess - return preprocess_info - - def query_state(self): - """Gets state control interface for given infer request - State control essential for recurrent networks - :return: A vector of Memory State objects - """ - cdef vector[C.CVariableState] c_mem_state_vec = deref(self.impl).queryState() - mem_state_vec = [] - for ms in c_mem_state_vec: - state = VariableState() - state.impl = ms - mem_state_vec.append(state) - return mem_state_vec - - def set_blob(self, blob_name : str, blob : Blob): - """Sets user defined Blob for the infer request - - :param blob_name: A name of input blob - :param blob: Blob object to set for the infer request - :param preprocess_info: PreProcessInfo object to set for the infer request. - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = IENetwork("./model.xml", "./model.bin") - exec_net = ie.load_network(net, "CPU", num_requests=2) - td = TensorDesc("FP32", (1, 3, 224, 224), "NCHW") - blob_data = np.ones(shape=(1, 3, 224, 224), dtype=np.float32) - blob = Blob(td, blob_data) - exec_net.requests[0].set_blob(blob_name="input_blob_name", blob=blob), - """ - deref(self.impl).setBlob(blob_name.encode(), blob._ptr) - self._user_blobs[blob_name] = blob - - cpdef infer(self, inputs=None): - """Starts synchronous inference of the infer request and fill outputs array - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper shape with - input data for the layer - :return: None - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].infer({input_blob: image}) - res = exec_net.requests[0].output_blobs['prob'] - np.flip(np.sort(np.squeeze(res)),0) - - # array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01, - # 5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03, - # 2.26027006e-03, 2.12283316e-03 ...]) - """ - if inputs is not None: - self._fill_inputs(inputs) - deref(self.impl).infer() - - cpdef async_infer(self, inputs=None): - """Starts asynchronous inference of the infer request and fill outputs array - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects - of proper shape with input data for the layer - :return: None - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].async_infer({input_blob: image}) - request_status = exec_net.requests[0].wait() - res = exec_net.requests[0].output_blobs['prob'] - """ - if inputs is not None: - self._fill_inputs(inputs) - deref(self.impl).infer_async() - - cpdef wait(self, timeout=None): - """Waits for the result to become available. Blocks until specified timeout elapses or the result - becomes available, whichever comes first. - - :param timeout: Time to wait in milliseconds or special (0, -1) cases described above. - If not specified, `timeout` value is set to -1 by default. - :return: Request status code. - - .. note:: - - There are special values of the timeout parameter: - - * 0 - Immediately returns the inference status. It does not block or interrupt execution. - To find statuses meaning, please refer to :ref:`enum_InferenceEngine_StatusCode` in Inference Engine C++ documentation - * -1 - Waits until inference result becomes available (default value) - - Usage example: See :func:`InferRequest.async_infer` method of the the :class:`InferRequest` class. - """ - cdef int status - cdef int64_t c_timeout - if timeout is None: - timeout = WaitMode.RESULT_READY - c_timeout = timeout - with nogil: - status = deref(self.impl).wait(c_timeout) - return status - - - cpdef get_perf_counts(self): - """Queries performance measures per layer to get feedback of what is the most time consuming layer. - - .. note:: Performance counters data and format depends on the plugin - - :return: Dictionary containing per-layer execution information. - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].infer({input_blob: image}) - exec_net.requests[0].get_perf_counts() - # {'Conv2D': {'exec_type': 'jit_avx2_1x1', - # 'real_time': 154, - # 'cpu_time': 154, - # 'status': 'EXECUTED', - # 'layer_type': 'Convolution'}, - # 'Relu6': {'exec_type': 'undef', - # 'real_time': 0, - # 'cpu_time': 0, - # 'status': 'NOT_RUN', - # 'layer_type': 'Clamp'} - # ... - # } - """ - cdef map[string, C.ProfileInfo] c_profile = deref(self.impl).getPerformanceCounts() - profile = {} - for line in c_profile: - info = line.second - # TODO: add execution index. Check if unsigned int is properly converted to int in python. - profile[line.first.decode()] = {"status": info.status.decode(), "exec_type": info.exec_type.decode(), - "layer_type": info.layer_type.decode(), "real_time": info.real_time, - "cpu_time": info.cpu_time, "execution_index": info.execution_index} - return profile - - ## Current infer request inference time in milliseconds - @property - def latency(self): - """ - Current infer request inference time in milliseconds - """ - return self.impl.exec_time - - - def _fill_inputs(self, inputs): - for k, v in inputs.items(): - assert k in self._inputs_list, f"No input with name {k} found in network" - if self.input_blobs[k].tensor_desc.precision == "FP16": - self.input_blobs[k].buffer[:] = v.view(dtype=np.int16) - else: - self.input_blobs[k].buffer[:] = v - - -cdef class IENetwork: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - """ - ## Class constructor - # - # @param model: A PyCapsule containing smart pointer to nGraph function. - # - # @return Instance of IENetwork class - # - # Usage example:\n - # Initializing `IENetwork` object from IR files: - # ```python - # func = Function([relu], [param], 'test') - # caps = Function.to_capsule(func) - # net = IENetwork(caps) - # ``` - def __cinit__(self, model = None): - # Try to create Inference Engine network from capsule - if model is not None: - self.impl = C.IENetwork(model) - else: - with nogil: - self.impl = C.IENetwork() - - @property - def name(self): - """ - Name of the loaded network - """ - name = bytes(self.impl.name) - return name.decode() - - @property - def input_info(self): - """ - A dictionary that maps input layer names to InputInfoPtr objects. - """ - cdef map[string, C.InputInfo.Ptr] c_inputs = self.impl.getInputsInfo() - inputs = {} - cdef InputInfoPtr input_info_ptr - for input in c_inputs: - input_info_ptr = InputInfoPtr() - input_info_ptr._ptr = input.second - input_info_ptr._ptr_network = &self.impl - inputs[input.first.decode()] = input_info_ptr - return inputs - - ## A dictionary that maps output layer names to DataPtr objects - @property - def outputs(self): - """ - A dictionary that maps output layer names to DataPtr objects - """ - cdef map[string, C.DataPtr] c_outputs = self.impl.getOutputs() - outputs = {} - cdef DataPtr data_ptr - for output in c_outputs: - data_ptr = DataPtr() - data_ptr._ptr_network = &self.impl - data_ptr._ptr = output.second - outputs[output.first.decode()] = data_ptr - return outputs - - - @property - def batch_size(self): - """Batch size of the network. Provides getter and setter interfaces to get and modify the - network batch size. For example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - print(net.batch_size) - net.batch_size = 4 - print(net.batch_size) - print(net.input_info['data'].input_data.shape) - """ - return self.impl.getBatch() - - @batch_size.setter - def batch_size(self, batch: int): - if batch <= 0: - raise AttributeError(f"Invalid batch size {batch}! Batch size should be positive integer value") - self.impl.setBatch(batch) - - def add_outputs(self, outputs): - """Marks any intermediate layer as output layer to retrieve the inference results from the specified layers. - - :param outputs: List of layers to be set as model outputs. The list can contain strings with layer names to be set - as outputs or tuples with layer name as first element and output port id as second element. - In case of setting one layer as output, string or tuple with one layer can be provided. - - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.add_outputs(["conv5_1', conv2_1', (split_2, 1)])] - """ - if not isinstance(outputs, list): - outputs = [outputs] - for i, line in enumerate(outputs): - if isinstance(line, str): - self.impl.addOutput(line.encode(), 0) - elif isinstance(line, tuple) and len(line) == 2: - self.impl.addOutput(line[0].encode(), line[1]) - else: - raise TypeError(f"Incorrect type {type(line)} for layer to add at index {i}. " - "Expected string with layer name or tuple with two elements: layer name as " - "first element and port id as second") - - def serialize(self, path_to_xml, path_to_bin: str = ""): - """Serializes the network and stores it in files. - - :param path_to_xml: Path to a file, where a serialized model will be stored - :param path_to_bin: Path to a file, where serialized weights will be stored - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml, weights=path_to_bin) - net.serialize(path_to_xml, path_to_bin) - """ - self.impl.serialize(path_to_xml.encode(), path_to_bin.encode()) - - def reshape(self, input_shapes: dict): - """Reshapes the network to change spatial dimensions, batch size, or any dimension. - - :param input_shapes: A dictionary that maps input layer names to tuples with the target shape - :return: None - - .. note:: - - Before using this method, make sure that the target shape is applicable for the network. - Changing the network shape to an arbitrary value may lead to unpredictable behaviour. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - input_layer = next(iter(net.input_info)) - n, c, h, w = net.input_info[input_layer].input_data.shape - net.reshape({input_layer: (n, c, h*2, w*2)}) - """ - cdef map[string, vector[size_t]] c_input_shapes - cdef vector[size_t] c_shape - net_inputs = self.input_info - for input, shape in input_shapes.items(): - c_shape = [] - if input not in net_inputs: - raise AttributeError(f"Specified '{input}' layer not in network inputs '{net_inputs}'! ") - for v in shape: - try: - c_shape.push_back(v) - except OverflowError: - raise ValueError(f"Detected dynamic dimension in the shape {shape} of the `{input}` input. Dynamic shapes are supported since OpenVINO Runtime API 2022.1.") - - c_input_shapes[input.encode()] = c_shape - self.impl.reshape(c_input_shapes) - - def _get_function_capsule(self): - return self.impl.getFunction() - - def get_ov_name_for_tensor(self, orig_name: str): - name = bytes(orig_name, 'utf-8') - return self.impl.getOVNameForTensor(name).decode('utf-8') - -cdef class BlobBuffer: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - Copy-less accessor for Inference Engine Blob - """ - - cdef reset(self, CBlob.Ptr & ptr, vector[size_t] representation_shape = []): - self.ptr = ptr - cdef CTensorDesc desc = deref(ptr).getTensorDesc() - cdef SizeVector shape - if len(representation_shape) == 0: - shape = desc.getDims() - if layout_int_to_str_map[desc.getLayout()] == 'SCALAR': - shape = [1] - else: - shape = representation_shape - cdef Py_ssize_t itemsize = deref(ptr).element_size() - self.strides.resize(shape.size()) - self.shape.resize(shape.size()) - - total_stride = itemsize - # dims are in row major (C - style), - # thence strides are computed starting from latest dimension - for i in reversed(range(shape.size())): - self.strides[i] = total_stride - self.shape[i] = shape[i] - total_stride *= shape[i] - - self.total_stride = total_stride - self.format = self._get_blob_format(desc) - self.item_size = itemsize - - def __getbuffer__(self, Py_buffer *buffer, int flags): - buffer.buf = C.get_buffer[char](deref(self.ptr)) - buffer.format = self.format - buffer.internal = NULL - buffer.itemsize = self.item_size - buffer.len = self.total_stride - buffer.ndim = self.shape.size() - buffer.obj = self - buffer.readonly = 0 - buffer.shape = self.shape.data() - buffer.strides = self.strides.data() - buffer.suboffsets = NULL - - cdef char*_get_blob_format(self, const CTensorDesc & desc): - cdef Precision precision = desc.getPrecision() - name = bytes(precision.name()).decode() - # todo: half floats - precision_to_format = { - 'FP32': 'f', # float - 'FP64': 'd', # double - 'FP16': 'h', # signed short - 'U8': 'B', # unsigned char - 'U16': 'H', # unsigned short - 'I8': 'b', # signed char - 'I16': 'h', # signed short - 'I32': 'i', # signed int - 'U32': 'I', # unsigned int - 'I64': 'q', # signed long int - 'U64': 'Q', # unsigned long int - 'BOOL': 'B', # unsigned char - 'BF16': 'h', # signed short - 'BIN': 'b', # signed char - } - if name not in precision_to_format: - raise ValueError(f"Unknown Blob precision: {name}") - - return precision_to_format[name].encode() - - def to_numpy(self, is_const= False): - precision = deref(self.ptr).getTensorDesc().getPrecision() - name = bytes(precision.name()).decode() - arr = np.asarray(self) - if is_const: - arr.flags.writeable = False - if name == "FP16": - return arr.view(dtype=np.float16) - else: - return arr diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp deleted file mode 100644 index 6f8a94b8854824..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp +++ /dev/null @@ -1,680 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_api_impl.hpp" - -#include "ie_plugin_config.hpp" -#include "openvino/op/util/framework_node.hpp" - -const std::string EXPORTED_NETWORK_NAME = "undefined"; -std::map precision_map = {{"FP32", InferenceEngine::Precision::FP32}, - {"FP64", InferenceEngine::Precision::FP64}, - {"FP16", InferenceEngine::Precision::FP16}, - {"I8", InferenceEngine::Precision::I8}, - {"I16", InferenceEngine::Precision::I16}, - {"I32", InferenceEngine::Precision::I32}, - {"I64", InferenceEngine::Precision::I64}, - {"U8", InferenceEngine::Precision::U8}, - {"U16", InferenceEngine::Precision::U16}, - {"U32", InferenceEngine::Precision::U32}, - {"U64", InferenceEngine::Precision::U64}}; - -std::map layout_map = {{"ANY", InferenceEngine::Layout::ANY}, - {"NCHW", InferenceEngine::Layout::NCHW}, - {"NHWC", InferenceEngine::Layout::NHWC}, - {"OIHW", InferenceEngine::Layout::OIHW}, - {"C", InferenceEngine::Layout::C}, - {"CHW", InferenceEngine::Layout::CHW}, - {"HW", InferenceEngine::Layout::HW}, - {"NC", InferenceEngine::Layout::NC}, - {"CN", InferenceEngine::Layout::CN}, - {"NCDHW", InferenceEngine::Layout::NCDHW}, - {"BLOCKED", InferenceEngine::Layout::BLOCKED}}; -#define stringify(name) #name -#define IE_CHECK_CALL(expr) \ - { \ - auto ret = (expr); \ - if (ret != InferenceEngine::StatusCode::OK) { \ - IE_THROW() << response.msg; \ - } \ - } - -static uint32_t getOptimalNumberOfRequests(const InferenceEngine::ExecutableNetwork& actual) { - try { - auto parameter_value = actual.GetMetric(METRIC_KEY(SUPPORTED_METRICS)); - auto supported_metrics = parameter_value.as>(); - const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); - if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) { - parameter_value = actual.GetMetric(key); - if (parameter_value.is()) - return parameter_value.as(); - else - IE_THROW() << "Unsupported format for " << key << "!" - << " Please specify number of infer requests directly!"; - } else { - IE_THROW() << "Can't load network: " << key << " is not supported!" - << " Please specify number of infer requests directly!"; - } - } catch (const std::exception& ex) { - IE_THROW() << "Can't load network: " << ex.what() << " Please specify number of infer requests directly!"; - } -} - -static PyObject* parse_parameter(const InferenceEngine::Parameter& param) { - // Check for std::string - if (param.is()) { - return PyUnicode_FromString(param.as().c_str()); - } - // Check for int - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((long)val); - } - // Check for unsigned int - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((unsigned long)val); - } - // Check for uint64_t - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((unsigned long)val); - } - // Check for float - else if (param.is()) { - auto val = param.as(); - return PyFloat_FromDouble((double)val); - } - // Check for bool - else if (param.is()) { - auto val = param.as(); - return val ? Py_True : Py_False; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyObject* str_val = PyUnicode_InternFromString(it.c_str()); - PyList_Append(list, str_val); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyFloat_FromDouble((double)it)); - } - return list; - } - // Check for std::tuple - else if (param.is>()) { - auto val = param.as>(); - PyObject* tuple = PyTuple_New(2); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - return tuple; - } - // Check for std::tuple - else if (param.is>()) { - auto val = param.as>(); - PyObject* tuple = PyTuple_New(3); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val))); - return tuple; - } - // Check for std::map - else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str())); - } - return dict; - } - // Check for std::map - else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); - } - return dict; - } else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - std::stringstream s; - s << it.first; - PyDict_SetItemString(dict, s.str().c_str(), PyFloat_FromDouble((double)it.second)); - } - return dict; - } else if (param.is()) { - auto val = param.as(); - using namespace InferenceEngine; - std::stringstream s; - s << val; - return PyUnicode_FromString(s.str().c_str()); - } else { - PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); - return (PyObject*)NULL; - } -} - -/* FrameworkNodeExtension is a temporary extension that is needed to enable FrameworkNode usage - * in IRReader for all unknown opsets and operations. To have a connection between Extension and - * IRReader we register extensions with specific version equal to "framework_node_ext" which - * triggers FrameworkNode usage - */ -class FrameworkNodeExtension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override { - static InferenceEngine::Version ExtensionDescription = {{1, 0}, "1.0", "framework_node_ext"}; - - versionInfo = &ExtensionDescription; - } - - std::map getOpSets() override { - std::map opsets; - ngraph::OpSet opset; - opset.insert(); - opsets["util"] = opset; - return opsets; - } - - void Unload() noexcept override {} -}; - -InferenceEnginePython::IENetwork InferenceEnginePython::read_network(std::string path_to_xml, std::string path_to_bin) { - InferenceEngine::Core core; - core.AddExtension(std::make_shared()); - auto net = core.ReadNetwork(path_to_xml, path_to_bin); - return InferenceEnginePython::IENetwork(std::make_shared(net)); -} - -InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr& cnn_network) - : actual(cnn_network) { - if (actual == nullptr) - IE_THROW() << "IENetwork was not initialized."; - name = actual->getName(); - batch_size = actual->getBatchSize(); -} - -InferenceEnginePython::IENetwork::IENetwork(PyObject* network) { - auto* capsule_ptr = PyCapsule_GetPointer(network, "ngraph_function"); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp == nullptr) - IE_THROW() << "Cannot create CNNNetwork from capsule! Capsule doesn't " - "contain nGraph function!"; - - InferenceEngine::CNNNetwork cnnNetwork(*function_sp); - actual = std::make_shared(cnnNetwork); - name = actual->getName(); - batch_size = actual->getBatchSize(); -} - -void InferenceEnginePython::IENetwork::serialize(const std::string& path_to_xml, const std::string& path_to_bin) { - actual->serialize(path_to_xml, path_to_bin); -} - -PyObject* InferenceEnginePython::IENetwork::getFunction() { - const char* py_capsule_name = "ngraph_function"; - auto ngraph_func_ptr = actual->getFunction(); - // create a shared pointer on the heap before putting it in the capsule - // this secures the lifetime of the object transferred by the capsule - auto* sp_copy = new std::shared_ptr(ngraph_func_ptr); - - // a destructor callback that will delete the heap allocated shared_ptr - // when the capsule is destructed - auto sp_deleter = [](PyObject* capsule) { - auto* capsule_ptr = PyCapsule_GetPointer(capsule, "ngraph_function"); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp) { - delete function_sp; - } - }; - if (ngraph_func_ptr) { - // return PyCapsule_New(&ngraph_func_ptr, py_capsule_name, NULL); - return PyCapsule_New(sp_copy, py_capsule_name, sp_deleter); - } else { - return nullptr; - } -} - -const std::map InferenceEnginePython::IENetwork::getInputsInfo() { - std::map inputs; - const InferenceEngine::InputsDataMap& inputsInfo = actual->getInputsInfo(); - for (auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; -} - -const std::map InferenceEnginePython::IENetwork::getOutputs() { - std::map outputs; - const InferenceEngine::OutputsDataMap& outputsInfo = actual->getOutputsInfo(); - for (auto& out : outputsInfo) { - outputs[out.first] = out.second; - } - return outputs; -} - -std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) { - return actual->getOVNameForTensor(orig_name); -} - -void InferenceEnginePython::IENetwork::addOutput(const std::string& out_layer, size_t port_id) { - actual->addOutput(out_layer, port_id); -} - -void InferenceEnginePython::IENetwork::setBatch(const size_t size) { - actual->setBatchSize(size); -} - -size_t InferenceEnginePython::IENetwork::getBatch() { - return actual->getBatchSize(); -} - -void InferenceEnginePython::IENetwork::reshape(const std::map>& input_shapes) { - actual->reshape(input_shapes); -} - -InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests) - : infer_requests(num_requests), - name(name) { - request_queue_ptr = std::make_shared(); -} - -void InferenceEnginePython::IEExecNetwork::infer() { - InferRequestWrap& request = infer_requests[0]; - request.infer(); -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGraphInfo() { - return IENetwork(std::make_shared(actual->GetExecGraphInfo())); -} - -PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string& metric_name) { - return parse_parameter(actual->GetMetric(metric_name)); -} - -PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string& name) { - return parse_parameter(actual->GetConfig(name)); -} - -void InferenceEnginePython::IEExecNetwork::setConfig(const std::map& config) { - std::map newConfig; - for (const auto& item : config) { - newConfig[item.first] = InferenceEngine::Parameter(item.second); - } - actual->SetConfig(newConfig); -} - -void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& model_file) { - actual->Export(model_file); -} - -std::map InferenceEnginePython::IEExecNetwork::getInputsInfo() { - InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo(); - std::map pyInputs; - for (const auto& item : inputsDataMap) { - pyInputs[item.first] = item.second; - } - return pyInputs; -} - -std::map InferenceEnginePython::IEExecNetwork::getOutputs() { - InferenceEngine::ConstOutputsDataMap outputsDataMap = actual->GetOutputsInfo(); - std::map pyOutputs; - for (const auto& item : outputsDataMap) { - pyOutputs[item.first] = item.second; - } - return pyOutputs; -} - -std::shared_ptr InferenceEnginePython::IEExecNetwork::getPluginLink() { - return actual; -} - -void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, - const InferenceEngine::Blob::Ptr& blob_ptr) { - request_ptr.SetBlob(blob_name.c_str(), blob_ptr); -} - -const InferenceEngine::PreProcessInfo& InferenceEnginePython::InferRequestWrap::getPreProcess( - const std::string& blob_name) { - return request_ptr.GetPreProcess(blob_name.c_str()); -} - -InferenceEngine::Blob::Ptr InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string& blob_name) { - return request_ptr.GetBlob(blob_name.c_str()); -} - -std::vector InferenceEnginePython::InferRequestWrap::queryState() { - auto queryStateVec = request_ptr.QueryState(); - std::vector memoryStates; - for (const auto& state : queryStateVec) { - InferenceEnginePython::CVariableState st; - st.variableState = state; - memoryStates.push_back(st); - } - return memoryStates; -} - -void InferenceEnginePython::InferRequestWrap::setCyCallback(cy_callback callback, void* data) { - user_callback = callback; - user_data = data; -} - -void InferenceEnginePython::InferRequestWrap::infer() { - start_time = Time::now(); - request_ptr.Infer(); - auto end_time = Time::now(); - auto execTime = std::chrono::duration_cast(end_time - start_time); - exec_time = static_cast(execTime.count()) * 0.000001; -} - -void InferenceEnginePython::InferRequestWrap::infer_async() { - request_queue_ptr->setRequestBusy(index); - start_time = Time::now(); - request_ptr.StartAsync(); -} - -int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) { - InferenceEngine::StatusCode code = request_ptr.Wait(timeout); - if (code != InferenceEngine::RESULT_NOT_READY) { - request_queue_ptr->setRequestIdle(index); - } - return static_cast(code); -} - -std::map -InferenceEnginePython::InferRequestWrap::getPerformanceCounts() { - std::map perf_counts = request_ptr.GetPerformanceCounts(); - std::map perf_map; - - for (auto it : perf_counts) { - InferenceEnginePython::ProfileInfo profile_info; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - profile_info.status = "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - profile_info.status = "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - profile_info.status = "OPTIMIZED_OUT"; - break; - default: - profile_info.status = "UNKNOWN"; - } - profile_info.exec_type = it.second.exec_type; - profile_info.layer_type = it.second.layer_type; - profile_info.cpu_time = it.second.cpu_uSec; - profile_info.real_time = it.second.realTime_uSec; - profile_info.execution_index = it.second.execution_index; - perf_map[it.first] = profile_info; - } - return perf_map; -} - -std::string InferenceEnginePython::get_version() { - auto version = InferenceEngine::GetInferenceEngineVersion(); - return version->buildNumber; -} - -InferenceEnginePython::IECore::IECore(const std::string& xmlConfigFile) { - actual = InferenceEngine::Core(xmlConfigFile); -} - -std::map InferenceEnginePython::IECore::getVersions( - const std::string& deviceName) { - return actual.GetVersions(deviceName); -} - -int InferenceEnginePython::IEExecNetwork::wait(int num_requests, int64_t timeout) { - return request_queue_ptr->wait(num_requests, timeout); -} - -int InferenceEnginePython::IEExecNetwork::getIdleRequestId() { - return request_queue_ptr->getIdleRequestId(); -} - -int InferenceEnginePython::IdleInferRequestQueue::wait(int num_requests, int64_t timeout) { - std::unique_lock lock(mutex); - if (timeout > 0) { - if (!cv.wait_for(lock, std::chrono::milliseconds(timeout), [this, num_requests]() { - return static_cast(idle_ids.size()) >= num_requests; - })) - return static_cast(InferenceEngine::StatusCode::RESULT_NOT_READY); - } else - cv.wait(lock, [this, num_requests]() { - return static_cast(idle_ids.size()) >= num_requests; - }); - return static_cast(InferenceEngine::StatusCode::OK); -} - -void InferenceEnginePython::IdleInferRequestQueue::setRequestIdle(int index) { - std::unique_lock lock(mutex); - idle_ids.emplace_back(index); - cv.notify_all(); -} - -void InferenceEnginePython::IdleInferRequestQueue::setRequestBusy(int index) { - std::lock_guard lock(mutex); - idle_ids.remove(index); -} - -int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() { - std::lock_guard lock(mutex); - return idle_ids.size() ? idle_ids.front() : -1; -} - -void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) { - if (0 == num_requests) { - num_requests = getOptimalNumberOfRequests(*actual); - } - infer_requests.resize(num_requests); - - for (int i = 0; i < num_requests; ++i) { - InferRequestWrap& infer_request = infer_requests[i]; - infer_request.index = i; - request_queue_ptr->setRequestIdle(i); - infer_request.request_queue_ptr = request_queue_ptr; - infer_request.request_ptr = actual->CreateInferRequest(); - - infer_request.request_ptr - .SetCompletionCallback>( - [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode code) { - if (code != InferenceEngine::StatusCode::OK) { - IE_EXCEPTION_SWITCH(code, - ExceptionType, - InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM} <<= - std::stringstream{}); - } - - auto end_time = Time::now(); - auto execTime = std::chrono::duration_cast(end_time - infer_request.start_time); - infer_request.exec_time = static_cast(execTime.count()) * 0.000001; - if (infer_request.user_callback) { - infer_request.user_callback(infer_request.user_data, code); - } - infer_request.request_queue_ptr->setRequestIdle(infer_request.index); - }); - } -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, - const std::string& binPath) { - InferenceEngine::CNNNetwork net = actual.ReadNetwork(modelPath, binPath); - return IENetwork(std::make_shared(net)); -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& model, - const uint8_t* bin, - size_t bin_size) { - InferenceEngine::MemoryBlob::Ptr weights_blob; - if (bin_size != 0) { - InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C); - weights_blob = InferenceEngine::make_shared_blob(tensorDesc); - weights_blob->allocate(); - memcpy(weights_blob->rwmap().as(), bin, bin_size); - } - InferenceEngine::CNNNetwork net = actual.ReadNetwork(model, weights_blob); - return IENetwork(std::make_shared(net)); -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetwork( - IENetwork network, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(*network.actual, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetwork( - IENetwork network, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(*network.actual, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(modelPath, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile( - const std::string& modelPath, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = std::make_shared(actual.LoadNetwork(modelPath, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::importNetwork( - const std::string& modelFIle, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(EXPORTED_NETWORK_NAME, num_requests); - exec_network->actual = - std::make_shared(actual.ImportNetwork(modelFIle, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::map InferenceEnginePython::IECore::queryNetwork( - InferenceEnginePython::IENetwork network, - const std::string& deviceName, - const std::map& config) { - auto res = actual.QueryNetwork(*network.actual, deviceName, config); - return res.supportedLayersMap; -} - -void InferenceEnginePython::IECore::setConfig(const std::map& config, - const std::string& deviceName) { - actual.SetConfig(config, deviceName); -} - -void InferenceEnginePython::IECore::registerPlugin(const std::string& pluginName, const std::string& deviceName) { - actual.RegisterPlugin(pluginName, deviceName); -} - -void InferenceEnginePython::IECore::unregisterPlugin(const std::string& deviceName) { - actual.UnregisterPlugin(deviceName); -} - -void InferenceEnginePython::IECore::registerPlugins(const std::string& xmlConfigFile) { - actual.RegisterPlugins(xmlConfigFile); -} - -void InferenceEnginePython::IECore::addExtension(const std::string& ext_lib_path, const std::string& deviceName) { - auto extension_ptr = std::make_shared(ext_lib_path); - auto extension = std::dynamic_pointer_cast(extension_ptr); - actual.AddExtension(extension, deviceName); -} - -std::vector InferenceEnginePython::IECore::getAvailableDevices() { - return actual.GetAvailableDevices(); -} - -PyObject* InferenceEnginePython::IECore::getMetric(const std::string& deviceName, const std::string& name) { - InferenceEngine::Parameter param = actual.GetMetric(deviceName, name); - return parse_parameter(param); -} - -PyObject* InferenceEnginePython::IECore::getConfig(const std::string& deviceName, const std::string& name) { - InferenceEngine::Parameter param = actual.GetConfig(deviceName, name); - return parse_parameter(param); -} - -void InferenceEnginePython::CVariableState::reset() { - variableState.Reset(); -} - -std::string InferenceEnginePython::CVariableState::getName() { - return variableState.GetName(); -} - -InferenceEngine::Blob::Ptr InferenceEnginePython::CVariableState::getState() { - InferenceEngine::Blob::CPtr c_blob = variableState.GetState(); - return std::const_pointer_cast(c_blob); -} - -void InferenceEnginePython::CVariableState::setState(InferenceEngine::Blob::Ptr state) { - variableState.SetState(state); -} - -const size_t InferenceEnginePython::product(const InferenceEngine::SizeVector& dims) { - return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies{}); -} diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp deleted file mode 100644 index 3c350e6508ad28..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Python.h" -#include "ie_core.hpp" - -typedef std::chrono::high_resolution_clock Time; -typedef std::chrono::nanoseconds ns; - -namespace InferenceEnginePython { - -struct ProfileInfo { - std::string status; - std::string exec_type; - std::string layer_type; - int64_t real_time; - int64_t cpu_time; - unsigned execution_index; -}; - -struct CVariableState { - InferenceEngine::VariableState variableState; - void reset(); - std::string getName(); - InferenceEngine::Blob::Ptr getState(); - void setState(InferenceEngine::Blob::Ptr state); -}; - -struct IENetwork { - std::shared_ptr actual; - std::string name; - std::size_t batch_size; - PyObject* getFunction(); - - void setBatch(const size_t size); - - size_t getBatch(); - - void addOutput(const std::string& out_layer, size_t port_id); - - const std::map getInputsInfo(); - - const std::map getOutputs(); - - void reshape(const std::map>& input_shapes); - - void serialize(const std::string& path_to_xml, const std::string& path_to_bin); - - IENetwork(const std::shared_ptr& cnn_network); - - IENetwork(PyObject* network); - - IENetwork() = default; - - void convertToOldRepresentation(); - - std::string getOVNameForTensor(const std::string& orig_name); -}; - -struct IdleInferRequestQueue { - std::list idle_ids; - std::mutex mutex; - std::condition_variable cv; - - void setRequestIdle(int index); - void setRequestBusy(int index); - - int wait(int num_requests, int64_t timeout); - - int getIdleRequestId(); - - using Ptr = std::shared_ptr; -}; - -struct InferRequestWrap { - int index; - using cy_callback = void (*)(void*, int); - - InferenceEngine::InferRequest request_ptr; - Time::time_point start_time; - double exec_time; - cy_callback user_callback; - void* user_data; - IdleInferRequestQueue::Ptr request_queue_ptr; - - void infer(); - - void infer_async(); - - int wait(int64_t timeout); - - void setCyCallback(cy_callback callback, void* data); - - InferenceEngine::Blob::Ptr getBlobPtr(const std::string& blob_name); - - void setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr); - - const InferenceEngine::PreProcessInfo& getPreProcess(const std::string& blob_name); - - std::map getPerformanceCounts(); - - std::vector queryState(); -}; - -struct IEExecNetwork { - std::shared_ptr actual; - std::vector infer_requests; - std::string name; - IdleInferRequestQueue::Ptr request_queue_ptr; - - IEExecNetwork(const std::string& name, size_t num_requests); - - IENetwork GetExecGraphInfo(); - - void infer(); - void exportNetwork(const std::string& model_file); - - std::map getInputsInfo(); - std::map getOutputs(); - - PyObject* getMetric(const std::string& metric_name); - PyObject* getConfig(const std::string& name); - void setConfig(const std::map& config); - - int wait(int num_requests, int64_t timeout); - int getIdleRequestId(); - - void createInferRequests(int num_requests); - - // binds plugin to InputInfo and Data, so that they can be destroyed before plugin (ussue 28996) - std::shared_ptr getPluginLink(); -}; - -struct IECore { - InferenceEngine::Core actual; - explicit IECore(const std::string& xmlConfigFile = std::string()); - std::map getVersions(const std::string& deviceName); - InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath); - InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size); - std::unique_ptr loadNetwork(IENetwork network, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::unique_ptr loadNetwork(IENetwork network, - const std::map& config, - int num_requests); - std::unique_ptr loadNetworkFromFile( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::unique_ptr loadNetworkFromFile( - const std::string& modelPath, - const std::map& config, - int num_requests); - std::unique_ptr importNetwork( - const std::string& modelFIle, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::map queryNetwork(IENetwork network, - const std::string& deviceName, - const std::map& config); - void setConfig(const std::map& config, const std::string& deviceName = std::string()); - void registerPlugin(const std::string& pluginName, const std::string& deviceName); - void unregisterPlugin(const std::string& deviceName); - void registerPlugins(const std::string& xmlConfigFile); - void addExtension(const std::string& ext_lib_path, const std::string& deviceName); - std::vector getAvailableDevices(); - PyObject* getMetric(const std::string& deviceName, const std::string& name); - PyObject* getConfig(const std::string& deviceName, const std::string& name); -}; - -template -T* get_buffer(InferenceEngine::Blob& blob) { - return blob.buffer().as(); -} - -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -std::string get_version(); - -InferenceEnginePython::IENetwork read_network(std::string path_to_xml, std::string path_to_bin); - -const size_t product(const InferenceEngine::SizeVector& dims); - -}; // namespace InferenceEnginePython diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd deleted file mode 100644 index f2eb928321c832..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from libc.stddef cimport size_t -from libcpp cimport bool -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp.map cimport map -from libcpp.memory cimport unique_ptr, shared_ptr, weak_ptr -from libc.stdint cimport int64_t, uint8_t - - -cdef extern from "" namespace "InferenceEngine": - ctypedef vector[size_t] SizeVector - - cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork" - - cdef cppclass TBlob[T]: - ctypedef shared_ptr[TBlob[T]] Ptr - - cdef cppclass CBlob "InferenceEngine::Blob": - ctypedef shared_ptr[CBlob] Ptr - const CTensorDesc& getTensorDesc() except + - size_t element_size() except + - void allocate() - void setShape(const SizeVector& dims) except + - - cdef TBlob[Type].Ptr make_shared_blob[Type](const CTensorDesc& tensorDesc) - - cdef TBlob[Type].Ptr make_shared_blob[Type](const CTensorDesc& tensorDesc, Type* ptr, size_t size) - - cdef cppclass CTensorDesc "InferenceEngine::TensorDesc": - CTensorDesc() except + - CTensorDesc(const Precision& precision, SizeVector dims, Layout layout) except + - SizeVector& getDims() except + - void setDims(const SizeVector& dims) except + - Layout getLayout() except + - void setLayout(Layout l) except + - const Precision& getPrecision() except + - void setPrecision(const Precision& p) except + - - - cdef cppclass Data: - const Precision getPrecision() const - void setPrecision(const Precision& precision) const - const SizeVector getDims() except + - const string& getName() except + - const Layout getLayout() except + - void setLayout(Layout layout) except + - const bool isInitialized() except + - - ctypedef shared_ptr[Data] DataPtr - ctypedef weak_ptr[Data] DataWeakPtr - ctypedef shared_ptr[const Data] CDataPtr - - cdef cppclass InputInfo: - ctypedef shared_ptr[InputInfo] Ptr - ctypedef shared_ptr[const InputInfo] CPtr - Precision getPrecision() const - void setPrecision(Precision p) - Layout getLayout() - void setLayout(Layout l) - const string& name() const - DataPtr getInputData() const - CPreProcessInfo& getPreProcess() - const CTensorDesc& getTensorDesc() const - void setInputData(DataPtr inputPtr) - - - cdef cppclass CPreProcessChannel "InferenceEngine::PreProcessChannel": - ctypedef shared_ptr[CPreProcessChannel] Ptr - CBlob.Ptr meanData - float stdScale - float meanValue - - cdef cppclass CPreProcessInfo "InferenceEngine::PreProcessInfo": - CPreProcessChannel.Ptr& operator[](size_t index) - size_t getNumberOfChannels() const - void init(const size_t numberOfChannels) - void setMeanImage(const CBlob.Ptr& meanImage) - void setMeanImageForChannel(const CBlob.Ptr& meanImage, const size_t channel) - vector[CPreProcessChannel.Ptr] _channelsInfo - ColorFormat getColorFormat() const - void setColorFormat(ColorFormat fmt) - ResizeAlgorithm getResizeAlgorithm() const - void setResizeAlgorithm(const ResizeAlgorithm& alg) - MeanVariant getMeanVariant() const - void setVariant(const MeanVariant& variant) - - ctypedef map[string, InputInfo.CPtr] InputsDataMap - - cdef cppclass Precision: - const char*name() const - @staticmethod - const Precision FromStr(const string& str) - - cdef struct apiVersion: - int minor - int major - - cdef cppclass Version: - const char *buildNumber - const char *description - apiVersion apiVersion - - cpdef enum MeanVariant: - pass - - cpdef enum ResizeAlgorithm: - pass - - cpdef enum ColorFormat: - pass - - cdef enum Layout: - ANY - NCHW - NHWC - NCDHW - NDHWC - OIHW - GOIHW - OIDHW - GOIDHW - SCALAR - C - CHW - HW - NC - CN - BLOCKED - - -cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": - - cdef cppclass CVariableState: - void reset() except + - string getName() except + - CBlob.Ptr getState() except + - void setState(CBlob.Ptr state) except + - - cdef cppclass ProfileInfo: - string status - string exec_type - string layer_type - long long real_time - long long cpu_time - unsigned int execution_index - - cdef cppclass WeightsInfo: - CBlob.Ptr & weights; - CBlob.Ptr & biases; - map[string, CBlob.Ptr] custom_blobs; - - cdef cppclass IEExecNetwork: - vector[InferRequestWrap] infer_requests - IENetwork GetExecGraphInfo() except + - map[string, CDataPtr] getOutputs() except + - map[string, InputInfo.CPtr] getInputsInfo() - void exportNetwork(const string & model_file) except + - object getMetric(const string & metric_name) except + - object getConfig(const string & metric_name) except + - void setConfig(const map[string, string]& config) except + - int wait(int num_requests, int64_t timeout) nogil - int getIdleRequestId() - shared_ptr[CExecutableNetwork] getPluginLink() except + - - cdef cppclass IENetwork: - IENetwork() nogil except + - IENetwork(object) except + - string name - size_t batch_size - string precision - map[string, vector[size_t]] inputs - const map[string, InputInfo.Ptr] getInputsInfo() except + - map[string, DataPtr] getOutputs() except + - void addOutput(string &, size_t) except + - void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except + - void setBatch(size_t size) except + - size_t getBatch() except + - void setLayerParams(map[string, map[string, string]] params_map) except + - void serialize(const string& path_to_xml, const string& path_to_bin) except + - void reshape(map[string, vector[size_t]] input_shapes) except + - object getFunction() except + - void convertToOldRepresentation() except + - string getOVNameForTensor(const string &) except + - - cdef cppclass InferRequestWrap: - double exec_time; - int index; - CBlob.Ptr getBlobPtr(const string & blob_name) except + - void setBlob(const string & blob_name, const CBlob.Ptr & blob_ptr) except + - void setBlob(const string &blob_name, const CBlob.Ptr &blob_ptr, CPreProcessInfo& info) except + - const CPreProcessInfo& getPreProcess(const string& blob_name) except + - map[string, ProfileInfo] getPerformanceCounts() except + - void infer() except + - void infer_async() except + - int wait(int64_t timeout) nogil except + - void setBatch(int size) except + - void setCyCallback(void (*)(void*, int), void *) except + - vector[CVariableState] queryState() except + - - cdef cppclass IECore: - IECore() nogil except + - IECore(const string & xml_config_file) nogil except + - map[string, Version] getVersions(const string & deviceName) except + - IENetwork readNetwork(const string& modelPath, const string& binPath) nogil except + - IENetwork readNetwork(const string& modelPath,uint8_t*bin, size_t bin_size) nogil except + - unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, const string & deviceName, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName, - const map[string, string] & config, int num_requests) except + - map[string, string] queryNetwork(IENetwork network, const string deviceName, - const map[string, string] & config) except + - void setConfig(const map[string, string] & config, const string & deviceName) except + - void registerPlugin(const string & pluginName, const string & deviceName) except + - void unregisterPlugin(const string & deviceName) except + - void registerPlugins(const string & xmlConfigFile) except + - void addExtension(const string & ext_lib_path, const string & deviceName) except + - vector[string] getAvailableDevices() except + - object getMetric(const string & deviceName, const string & name) except + - object getConfig(const string & deviceName, const string & name) except + - - cdef T*get_buffer[T](CBlob &) - - cdef string get_version() - - cdef IENetwork read_network(string path_to_xml, string path_to_bin) - - cdef const size_t product(const SizeVector& dims) diff --git a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt b/src/bindings/python/src/compatibility/openvino/requirements-dev.txt deleted file mode 100644 index cc35217dcbebf6..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt +++ /dev/null @@ -1 +0,0 @@ -cython>=3.0.2 diff --git a/src/bindings/python/src/compatibility/openvino/setup.cfg b/src/bindings/python/src/compatibility/openvino/setup.cfg deleted file mode 100644 index af37819e5f3566..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/setup.cfg +++ /dev/null @@ -1,24 +0,0 @@ -[flake8] -# D104 - Missing docstring in public package -inline-quotes = double -filename = *.py, *.pyx -max-line-length = 160 -ignore = E203,D104 -max-parameters-amount = 8 -show_source = True -docstring-convention = google -enable-extensions = G -per-file-ignores = - *.pyx: E225, E226, E251, E999, E800, E265, E203, E266, E227, E211 - *__init__.py: F403, F405, F401 - -[pydocstyle] -convention = google - -[mypy] -ignore_missing_imports = True -disable_error_code = attr-defined -show_column_numbers = True -show_error_context = True -show_absolute_path = True -pretty = True diff --git a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt deleted file mode 100644 index 8d3ac1ab0c02a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -cmake_minimum_required (VERSION 3.13) - -project (pyngraph) - -if(NOT DEFINED OpenVINO_SOURCE_DIR) - find_package(OpenVINO REQUIRED) - find_package(OpenVINODeveloperPackage QUIET - PATHS "${InferenceEngineDeveloperPackage_DIR}") -endif() - -# Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined in FindPython3 -set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) - -if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/) -else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/) -endif() - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - -# compile options - -if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # disable warning: This operator was deprecated and will be removed with v0 operation. - add_compile_options(/wd4996) -elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - add_compile_options(-Wno-deprecated-register -Wno-range-loop-analysis) -elseif(OV_COMPILER_IS_APPLECLANG) - add_link_options(-stdlib=libc++) - add_compile_options(-Wno-unused-value -Wno-range-loop-analysis) -elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - # WA for GCC 7.5 "PYBIND11_NOINLINE inline" warning - add_compile_options(-Wno-error=attributes) -endif() - -if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # for proper fix need to update pybind to version which does not use PyEval_InitThreads() - add_compile_options(-Wno-deprecated-declarations -Wno-undef) -endif() - -# create target - -file(GLOB_RECURSE SOURCES *.cpp) - -pybind11_add_module(_${PROJECT_NAME} MODULE NO_EXTRAS ${SOURCES}) - -target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../") - -target_link_libraries(_${PROJECT_NAME} PRIVATE openvino::runtime openvino::core::dev) - -set_target_properties(_${PROJECT_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -# perform copy -add_custom_command(TARGET _${PROJECT_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph -) - -ov_set_install_rpath(_${PROJECT_NAME} ${OV_CPACK_PYTHONDIR} ${OV_CPACK_RUNTIMEDIR}) - -# Install - -ov_python_minimal_api(_${PROJECT_NAME}) -ov_add_clang_format_target(_${PROJECT_NAME}_clang FOR_TARGETS _${PROJECT_NAME}) - -ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} HIDDEN) - -install(TARGETS _${PROJECT_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - USE_SOURCE_PERMISSIONS) diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp b/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp deleted file mode 100644 index 0aa59b9a055e27..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/axis_set.hpp" // ngraph::AxisSet - -#include -#include - -#include -#include -#include - -#include "pyngraph/axis_set.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_AxisSet(py::module m) { - py::class_> axis_set(m, "AxisSet", py::module_local()); - axis_set.doc() = "ngraph.impl.AxisSet wraps ngraph::AxisSet"; - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init(), py::arg("axes")); - - axis_set.def("__len__", [](const ngraph::AxisSet& v) { - return v.size(); - }); - - axis_set.def( - "__iter__", - [](ngraph::AxisSet& v) { - return py::make_iterator(v.begin(), v.end()); - }, - py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - - axis_set.def("__repr__", [](const ngraph::AxisSet& self) -> std::string { - std::stringstream data_ss; - std::copy(self.begin(), self.end(), std::ostream_iterator(data_ss, ", ")); - std::string data_str = data_ss.str(); - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp b/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp deleted file mode 100644 index e7232ec17806a3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_AxisSet(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp b/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp deleted file mode 100644 index f8b133625ab288..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/axis_vector.hpp" // ngraph::AxisVector - -#include -#include - -#include "pyngraph/axis_vector.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_AxisVector(py::module m) { - py::class_> axis_vector(m, - "AxisVector", - py::module_local()); - axis_vector.doc() = "ngraph.impl.AxisVector wraps ngraph::AxisVector"; - axis_vector.def(py::init&>(), py::arg("axes")); - axis_vector.def(py::init&>(), py::arg("axes")); - axis_vector.def(py::init(), py::arg("axes")); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp b/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp deleted file mode 100644 index 74c452474340df..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_AxisVector(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp b/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp deleted file mode 100644 index faa965d63de808..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/coordinate.hpp" // ov::Coordinate - -#include -#include - -#include "openvino/core/shape.hpp" -#include "pyngraph/coordinate.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Coordinate(py::module m) { - py::class_> coordinate(m, "Coordinate", py::module_local()); - coordinate.doc() = "ngraph.impl.Coordinate wraps ov::Coordinate"; - coordinate.def(py::init&>()); - coordinate.def(py::init()); - coordinate.def(py::init&>()); - coordinate.def(py::init()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp b/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp deleted file mode 100644 index b9bf9f6574e99b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Coordinate(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp b/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp deleted file mode 100644 index 6c91879ad3ca60..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/coordinate_diff.hpp" // ngraph::CoordinateDiff - -#include -#include - -#include -#include -#include - -#include "pyngraph/coordinate_diff.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_CoordinateDiff(py::module m) { - py::class_> coordinate_diff(m, - "CoordinateDiff", - py::module_local()); - coordinate_diff.doc() = "ngraph.impl.CoordinateDiff wraps ngraph::CoordinateDiff"; - coordinate_diff.def(py::init&>()); - coordinate_diff.def(py::init&>()); - coordinate_diff.def(py::init()); - - coordinate_diff.def("__str__", [](const ngraph::CoordinateDiff& self) -> std::string { - std::stringstream stringstream; - std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); - std::string string = stringstream.str(); - return string.substr(0, string.size() - 2); - }); - - coordinate_diff.def("__repr__", [](const ngraph::CoordinateDiff& self) -> std::string { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape_str = py::cast(self).attr("__str__")().cast(); - return "<" + class_name + ": (" + shape_str + ")>"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp b/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp deleted file mode 100644 index b5ec670888266d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_CoordinateDiff(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp b/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp deleted file mode 100644 index e83206afde05d6..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// These are not used here, but needed in order to not violate ODR, since -// these are included in other translation units, and specialize some types. -// Related: https://github.com/pybind/pybind11/issues/1055 -#include "dict_attribute_visitor.hpp" - -#include -#include - -#include "ngraph/op/loop.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" - -namespace py = pybind11; - -util::DictAttributeDeserializer::DictAttributeDeserializer( - const py::dict& attributes, - std::unordered_map>& variables) - : m_attributes(attributes), - m_variables(variables) {} - -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - if (const auto& a = ov::as_type< - ngraph::AttributeAdapter>>>( - &adapter)) { - std::vector> input_descs; - const py::dict& input_desc = m_attributes[name.c_str()].cast(); - - if (input_desc.contains("slice_input_desc") && !input_desc["slice_input_desc"].is_none()) { - for (py::handle h : input_desc["slice_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto slice_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast(), - desc["start"].cast(), - desc["stride"].cast(), - desc["part_size"].cast(), - desc["end"].cast(), - desc["axis"].cast()); - input_descs.push_back(slice_in); - } - } - - if (input_desc.contains("merged_input_desc") && !input_desc["merged_input_desc"].is_none()) { - for (py::handle h : input_desc["merged_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto merged_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast(), - desc["body_value_idx"].cast()); - input_descs.push_back(merged_in); - } - } - - if (input_desc.contains("invariant_input_desc") && !input_desc["invariant_input_desc"].is_none()) { - for (py::handle h : input_desc["invariant_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto invariant_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast()); - input_descs.push_back(invariant_in); - } - } - a->set(input_descs); - } else if (const auto& a = ov::as_type>>>(&adapter)) { - std::vector> output_descs; - const py::dict& output_desc = m_attributes[name.c_str()].cast(); - if (output_desc.contains("body_output_desc") && !output_desc["body_output_desc"].is_none()) { - for (py::handle h : output_desc["body_output_desc"].cast()) { - const py::dict& desc = h.cast(); - auto body_output = std::make_shared( - desc["body_value_idx"].cast(), - desc["output_idx"].cast(), - desc["iteration"].cast()); - output_descs.push_back(body_output); - } - } - - if (output_desc.contains("concat_output_desc") && !output_desc["concat_output_desc"].is_none()) { - for (py::handle h : output_desc["concat_output_desc"].cast()) { - const py::dict& desc = h.cast(); - auto concat_output = std::make_shared( - desc["body_value_idx"].cast(), - desc["output_idx"].cast(), - desc["start"].cast(), - desc["stride"].cast(), - desc["part_size"].cast(), - desc["end"].cast(), - desc["axis"].cast()); - output_descs.push_back(concat_output); - } - } - a->set(output_descs); - } else if (const auto& a = - ov::as_type>(&adapter)) { - ngraph::op::v5::Loop::SpecialBodyPorts special_body_ports; - const py::dict& special_ports_dict = m_attributes[name.c_str()].cast(); - special_body_ports.body_condition_output_idx = - special_ports_dict["body_condition_output_idx"].cast(); - special_body_ports.current_iteration_input_idx = - special_ports_dict["current_iteration_input_idx"].cast(); - a->set(special_body_ports); - } else if (const auto& a = ov::as_type>>(&adapter)) { - std::string variable_id = m_attributes[name.c_str()].cast(); - if (!m_variables.count(variable_id)) { - m_variables[variable_id] = std::make_shared( - ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id}); - } - a->set(m_variables[variable_id]); - } else { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} - -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - if (name == "body" || name == "then_body" || name == "else_body") { - const py::dict& body_attrs = m_attributes[name.c_str()].cast(); - const auto& body_outputs = as_output_vector(body_attrs["results"].cast()); - const auto& body_parameters = body_attrs["parameters"].cast(); - auto body = std::make_shared(body_outputs, body_parameters); - adapter.set(body); - } else { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } - } -} - -util::DictAttributeSerializer::DictAttributeSerializer(const std::shared_ptr& node) { - node->visit_attributes(*this); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp b/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp deleted file mode 100644 index 6cad47a10d0599..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/function.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/util/variable.hpp" - -#include - -namespace py = pybind11; - -namespace util -{ - class DictAttributeDeserializer : public ngraph::AttributeVisitor - { - public: - DictAttributeDeserializer( - const py::dict& attributes, - std::unordered_map>& variables); - - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - protected: - const py::dict& m_attributes; - std::unordered_map>& m_variables; - }; - - class DictAttributeSerializer : public ngraph::AttributeVisitor - { - public: - explicit DictAttributeSerializer(const std::shared_ptr& node); - - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - template - T get_attribute(const std::string& name) - { - NGRAPH_CHECK(m_attributes.contains(name), - "Couldn't find attribute \"", - name, - "\" in serialized node attribute dictionary."); - return m_attributes[name.c_str()].cast(); - } - - py::dict get_attributes() const { return m_attributes; } - - protected: - py::dict m_attributes; - }; -} // namespace util diff --git a/src/bindings/python/src/compatibility/pyngraph/dimension.cpp b/src/bindings/python/src/compatibility/pyngraph/dimension.cpp deleted file mode 100644 index 37b7210644eef6..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dimension.cpp +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/dimension.hpp" // ov::Dimension - -#include -#include - -#include -#include -#include - -#include "pyngraph/dimension.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Dimension(py::module m) { - using value_type = ov::Dimension::value_type; - - py::class_> dim(m, "Dimension", py::module_local()); - dim.doc() = "ngraph.impl.Dimension wraps ov::Dimension"; - dim.def(py::init<>()); - dim.def(py::init(), - py::arg("dimension"), - R"( - Construct a static dimension. - - :param dimension: Value of the dimension. - :type dimension: int - )"); - dim.def(py::init(), - py::arg("min_dimension"), - py::arg("max_dimension"), - R"( - Construct a dynamic dimension with bounded range. - - :param min_dimension: The lower inclusive limit for the dimension. - :type min_dimension: int - :param max_dimension: inclusive limit for the dimension. - :type max_dimension: The upper inclusive limit for the dimension. - )"); - - dim.def_static("dynamic", &ov::Dimension::dynamic); - - dim.def_property_readonly("is_dynamic", - &ov::Dimension::is_dynamic, - R"( - Check if Dimension is dynamic. - - :return: True if dynamic, else False. - :rtype: bool - )"); - dim.def_property_readonly("is_static", - &ov::Dimension::is_static, - R"( - Check if Dimension is static. - - :return: True if static, else False. - :rtype: bool - )"); - - dim.def( - "__eq__", - [](const ov::Dimension& a, const ov::Dimension& b) { - return a == b; - }, - py::is_operator()); - dim.def( - "__eq__", - [](const ov::Dimension& a, const int64_t& b) { - return a == b; - }, - py::is_operator()); - - dim.def("__len__", &ov::Dimension::get_length); - dim.def("get_length", - &ov::Dimension::get_length, - R"( - Return this dimension as integer. - This dimension must be static and non-negative. - - :return Value of the dimension. - :rtype: int - )"); - dim.def("get_min_length", - &ov::Dimension::get_min_length, - R"( - Return this dimension's min_dimension as integer. - This dimension must be dynamic and non-negative. - - :return: Value of the dimension. - :rtype: int - )"); - dim.def("get_max_length", - &ov::Dimension::get_max_length, - R"( - Return this dimension's max_dimension as integer. - This dimension must be dynamic and non-negative. - - :return: Value of the dimension. - :rtype: int - )"); - - dim.def("same_scheme", - &ov::Dimension::same_scheme, - py::arg("dim"), - R"( - Return this dimension's max_dimension as integer. - This dimension must be dynamic and non-negative. - - :param dim: The other dimension to compare this dimension to. - :type dim: Dimension - :return: True if this dimension and dim are both dynamic, - or if they are both static and equal, otherwise False. - :rtype: bool - )"); - dim.def("compatible", - &ov::Dimension::compatible, - py::arg("d"), - R"( - Check whether this dimension is capable of being merged - with the argument dimension. - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension is compatible with d, else False. - :rtype: bool - )"); - dim.def("relaxes", - &ov::Dimension::relaxes, - py::arg("d"), - R"( - Check whether this dimension is a relaxation of the argument. - This dimension relaxes (or is a relaxation of) d if: - - (1) this and d are static and equal - (2) this dimension contains d dimension - - this.relaxes(d) is equivalent to d.refines(this). - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension relaxes d, else False. - :rtype: bool - )"); - dim.def("refines", - &ov::Dimension::refines, - py::arg("d"), - R"( - Check whether this dimension is a refinement of the argument. - This dimension refines (or is a refinement of) d if: - - (1) this and d are static and equal - (2) d dimension contains this dimension - - this.refines(d) is equivalent to d.relaxes(this). - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension refines d, else False. - :rtype: bool - )"); - - dim.def("__str__", [](const ov::Dimension& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - dim.def("__repr__", [](const ov::Dimension& self) -> std::string { - return "() + ">"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/dimension.hpp b/src/bindings/python/src/compatibility/pyngraph/dimension.hpp deleted file mode 100644 index a0a5ec80f67d4d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dimension.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Dimension(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp b/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp deleted file mode 100644 index 9c7df295f0a39d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/discrete_type_info.hpp" - -#include -#include -#include - -#include "openvino/core/type.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_DiscreteTypeInfo(py::module m) { - py::class_> discrete_type_info(m, - "DiscreteTypeInfo", - py::module_local()); - discrete_type_info.doc() = "ngraph.impl.DiscreteTypeInfo wraps ov::DiscreteTypeInfo"; - - // operator overloading - discrete_type_info.def(py::self < py::self); - discrete_type_info.def(py::self <= py::self); - discrete_type_info.def(py::self > py::self); - discrete_type_info.def(py::self >= py::self); - discrete_type_info.def(py::self == py::self); - discrete_type_info.def(py::self != py::self); - - discrete_type_info.def_readonly("name", &ov::DiscreteTypeInfo::name); - discrete_type_info.def_readonly("version_id", &ov::DiscreteTypeInfo::version_id); - discrete_type_info.def_readonly("parent", &ov::DiscreteTypeInfo::parent); - - discrete_type_info.def("__repr__", [](const ov::DiscreteTypeInfo& self) { - std::string name = std::string(self.name); - std::string version = std::string(self.version_id); - if (self.parent != nullptr) { - std::string parent_version = std::string(self.parent->version_id); - std::string parent_name = self.parent->name; - return ""; - } - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp b/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp deleted file mode 100644 index ec80f48f9a2a67..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_DiscreteTypeInfo(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/function.cpp b/src/bindings/python/src/compatibility/pyngraph/function.cpp deleted file mode 100644 index a64dd0a36dcc5b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/function.cpp +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/function.hpp" // ngraph::Function - -#include -#include - -#include "ngraph/op/parameter.hpp" // ngraph::op::Parameter -#include "ngraph/op/sink.hpp" -#include "pyngraph/function.hpp" - -namespace py = pybind11; - -static const char* CAPSULE_NAME = "ngraph_function"; - -void regclass_pyngraph_Function(py::module m) { - py::class_> function(m, "Function", py::module_local()); - function.doc() = "ngraph.impl.Function wraps ngraph::Function"; - - function.def(py::init([](const ngraph::ResultVector& res, - const std::vector>& nodes, - const ngraph::ParameterVector& params, - const std::string& name) { - ngraph::SinkVector sinks; - for (const auto& node : nodes) { - auto sink = std::dynamic_pointer_cast(node); - NGRAPH_CHECK(sink != nullptr, "Node {} is not instance of Sink"); - sinks.push_back(sink); - } - return std::make_shared(res, sinks, params, name); - }), - py::arg("results"), - py::arg("sinks"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : List[op.Result] - List of results. - - sinks : List[Node] - List of Nodes to be used as Sinks (e.g. Assign ops). - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - - function.def(py::init>&, - const std::vector>&, - const std::string&>(), - py::arg("results"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : List[Node] - List of Nodes to be used as results. - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - - function.def(py::init&, - const std::vector>&, - const std::string&>(), - py::arg("result"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : Node - Node to be used as result. - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - function.def("get_output_size", - &ngraph::Function::get_output_size, - R"( - Return the number of outputs for the function. - - Returns - ---------- - get_output_size : int - Number of outputs. - )"); - function.def("get_ops", - &ngraph::Function::get_ops, - R"( - Return ops used in the function. - - Returns - ---------- - get_ops : List[Node] - List of Nodes representing ops used in function. - )"); - function.def("get_ordered_ops", - &ngraph::Function::get_ordered_ops, - R"( - Return ops used in the function in topological order. - - Returns - ---------- - get_ordered_ops : List[Node] - List of sorted Nodes representing ops used in function. - )"); - function.def("get_output_op", - &ngraph::Function::get_output_op, - py::arg("i"), - R"( - Return the op that generates output i - - Parameters - ---------- - i : int - output index - - Returns - ---------- - get_output_op : Node - Node object that generates output i - )"); - function.def("get_output_element_type", - &ngraph::Function::get_output_element_type, - py::arg("i"), - R"( - Return the element type of output i - - Parameters - ---------- - i : int - output index - - Returns - ---------- - get_output_op : Type - Type object of output i - )"); - function.def("get_output_shape", - &ngraph::Function::get_output_shape, - py::arg("i"), - R"( - Return the shape of element i - - Parameters - ---------- - i : int - element index - - Returns - ---------- - get_output_shape : Shape - Shape object of element i - )"); - function.def("get_output_partial_shape", - &ngraph::Function::get_output_partial_shape, - py::arg("i"), - R"( - Return the partial shape of element i - - Parameters - ---------- - i : int - element index - - Returns - ---------- - get_output_partial_shape : PartialShape - PartialShape object of element i - )"); - function.def("get_parameters", - &ngraph::Function::get_parameters, - R"( - Return the function parameters. - - Returns - ---------- - get_parameters : ParameterVector - ParameterVector containing function parameters. - )"); - function.def("get_results", - &ngraph::Function::get_results, - R"( - Return a list of function outputs. - - Returns - ---------- - get_results : ResultVector - ResultVector containing function parameters. - )"); - function.def("get_result", - &ngraph::Function::get_result, - R"( - Return single result. - - Returns - ---------- - get_result : Node - Node object representing result. - )"); - function.def("get_name", - &ngraph::Function::get_name, - R"( - Get the unique name of the function. - - Returns - ---------- - get_name : str - String with a name of the function. - )"); - function.def("get_friendly_name", - &ngraph::Function::get_friendly_name, - R"( - Gets the friendly name for a function. If no - friendly name has been set via set_friendly_name - then the function's unique name is returned. - - Returns - ---------- - get_friendly_name : str - String with a friendly name of the function. - )"); - function.def("set_friendly_name", - &ngraph::Function::set_friendly_name, - py::arg("name"), - R"( - Sets a friendly name for a function. This does - not overwrite the unique name of the function and - is retrieved via get_friendly_name(). Used mainly - for debugging. - - Parameters - ---------- - name : str - String to set as the friendly name. - )"); - function.def("is_dynamic", - &ngraph::Function::is_dynamic, - R"( - Returns true if any of the op's defined in the function - contains partial shape. - - Returns - ---------- - is_dynamic : bool - )"); - function.def("__repr__", [](const ngraph::Function& self) { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::stringstream shapes_ss; - for (size_t i = 0; i < self.get_output_size(); ++i) { - if (i > 0) { - shapes_ss << ", "; - } - shapes_ss << self.get_output_partial_shape(i); - } - return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; - }); - function.def_static("from_capsule", [](py::object* capsule) { - // get the underlying PyObject* which is a PyCapsule pointer - auto* pybind_capsule_ptr = capsule->ptr(); - // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME - auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); - - auto* ngraph_function = static_cast*>(capsule_ptr); - if (ngraph_function && *ngraph_function) { - return *ngraph_function; - } else { - throw std::runtime_error("The provided capsule does not contain an ngraph::Function"); - } - }); - function.def_static("to_capsule", [](std::shared_ptr& ngraph_function) { - // create a shared pointer on the heap before putting it in the capsule - // this secures the lifetime of the object transferred by the capsule - auto* sp_copy = new std::shared_ptr(ngraph_function); - - // a destructor callback that will delete the heap allocated shared_ptr - // when the capsule is destructed - auto sp_deleter = [](PyObject* capsule) { - auto* capsule_ptr = PyCapsule_GetPointer(capsule, CAPSULE_NAME); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp) { - delete function_sp; - } - }; - - // put the shared_ptr in a new capsule under the same name as in "from_capsule" - auto pybind_capsule = py::capsule(sp_copy, CAPSULE_NAME, sp_deleter); - - return pybind_capsule; - }); - - function.def_property_readonly("name", &ngraph::Function::get_name); - function.def_property("friendly_name", &ngraph::Function::get_friendly_name, &ngraph::Function::set_friendly_name); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/function.hpp b/src/bindings/python/src/compatibility/pyngraph/function.hpp deleted file mode 100644 index 7bfb8328004242..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/function.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Function(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node.cpp b/src/bindings/python/src/compatibility/pyngraph/node.cpp deleted file mode 100644 index f696a4297cad7f..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node.cpp +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node.hpp" - -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/subtract.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/rt_map.hpp" -#include "pyngraph/variant.hpp" - -class PyNode : public ngraph::Node { -public: - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& inputs) const override { - PYBIND11_OVERRIDE_PURE(std::shared_ptr, ngraph::Node, clone_with_new_inputs, inputs); - } - - const type_info_t& get_type_info() const override { - PYBIND11_OVERRIDE_PURE(type_info_t&, ngraph::Node, get_type_info, ); - } -}; - -namespace impl { -namespace { -py::dict get_attributes(const std::shared_ptr& node) { - util::DictAttributeSerializer dict_serializer(node); - return dict_serializer.get_attributes(); -} - -void set_attribute(std::shared_ptr& node, const std::string& atr_name, py::object value) { - py::dict attr_dict; - attr_dict[atr_name.c_str()] = value; - std::unordered_map> variables; - util::DictAttributeDeserializer dict_deserializer(attr_dict, variables); - node->visit_attributes(dict_deserializer); -} -} // namespace -} // namespace impl - -namespace py = pybind11; - -using PyRTMap = ngraph::Node::RTMap; - -PYBIND11_MAKE_OPAQUE(PyRTMap); - -void regclass_pyngraph_Node(py::module m) { - py::class_, PyNode> node(m, - "Node", - py::dynamic_attr(), - py::module_local()); - node.doc() = "ngraph.impl.Node wraps ngraph::Node"; - node.def( - "__add__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__sub__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__mul__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__div__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__truediv__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - - node.def("__repr__", [](const ngraph::Node& self) { - std::string type_name = self.get_type_name(); - std::stringstream shapes_ss; - for (size_t i = 0; i < self.get_output_size(); ++i) { - if (i > 0) { - shapes_ss << ", "; - } - shapes_ss << self.get_output_partial_shape(i); - } - return "<" + type_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; - }); - - node.def("get_element_type", - &ngraph::Node::get_element_type, - R"( - Checks that there is exactly one output and returns it's element type. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - node.def("get_output_size", - &ngraph::Node::get_output_size, - R"( - Returns the number of outputs from the node. - - Returns - ---------- - get_element_type : int - Number of outputs. - )"); - node.def("get_output_element_type", - &ngraph::Node::get_output_element_type, - py::arg("i"), - R"( - Returns the element type for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_element_type : Type - Type of the output i - )"); - node.def("get_output_shape", - &ngraph::Node::get_output_shape, - py::arg("i"), - R"( - Returns the shape for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_shape : Shape - Shape of the output i - )"); - node.def("get_output_partial_shape", - &ngraph::Node::get_output_partial_shape, - py::arg("i"), - R"( - Returns the partial shape for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_partial_shape : PartialShape - PartialShape of the output i - )"); - node.def("get_type_name", - &ngraph::Node::get_type_name, - R"( - Returns Type's name from the node. - - Returns - ---------- - get_type_name : str - String repesenting Type's name. - )"); - node.def("get_name", - &ngraph::Node::get_name, - R"( - Get the unique name of the node - - Returns - ---------- - get_name : str - Unique name of the node. - )"); - node.def("get_friendly_name", - &ngraph::Node::get_friendly_name, - R"( - Gets the friendly name for a node. If no friendly name has - been set via set_friendly_name then the node's unique name - is returned. - - Returns - ---------- - get_name : str - Friendly name of the node. - )"); - node.def("get_type_info", &ngraph::Node::get_type_info); - node.def("set_friendly_name", - &ngraph::Node::set_friendly_name, - py::arg("name"), - R"( - Sets a friendly name for a node. This does not overwrite the unique name - of the node and is retrieved via get_friendly_name(). Used mainly for - debugging. The friendly name may be set exactly once. - - Parameters - ---------- - name : str - Friendly name to set. - )"); - node.def("input", - (ngraph::Input(ngraph::Node::*)(size_t)) & ngraph::Node::input, - py::arg("input_index"), - R"( - A handle to the input_index input of this node. - - Parameters - ---------- - input_index : int - Index of Input. - - Returns - ---------- - input : Input - Input of this node. - )"); - node.def("inputs", - (std::vector>(ngraph::Node::*)()) & ngraph::Node::inputs, - R"( - A list containing a handle for each of this node's inputs, in order. - - Returns - ---------- - inputs : List[Input] - List of node's inputs. - )"); - node.def("output", - (ngraph::Output(ngraph::Node::*)(size_t)) & ngraph::Node::output, - py::arg("output_index"), - R"( - A handle to the output_index output of this node. - - Parameters - ---------- - output_index : int - Index of Output. - - Returns - ---------- - input : Output - Output of this node. - )"); - node.def("outputs", - (std::vector>(ngraph::Node::*)()) & ngraph::Node::outputs, - R"( - A list containing a handle for each of this node's outputs, in order. - - Returns - ---------- - inputs : List[Output] - List of node's outputs. - )"); - node.def("get_rt_info", - (PyRTMap & (ngraph::Node::*)()) & ngraph::Node::get_rt_info, - py::return_value_policy::reference_internal, - R"( - Returns PyRTMap which is a dictionary of user defined runtime info. - - Returns - ---------- - get_rt_info : PyRTMap - A dictionary of user defined data. - )"); - - node.def("set_argument", &ngraph::Node::set_argument); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::NodeVector& args) { - self->set_arguments(args); - }); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::OutputVector& args) { - self->set_arguments(args); - }); - - node.def_property_readonly("shape", &ngraph::Node::get_shape); - node.def_property_readonly("name", &ngraph::Node::get_name); - node.def_property_readonly("rt_info", - (PyRTMap & (ngraph::Node::*)()) & ngraph::Node::get_rt_info, - py::return_value_policy::reference_internal); - node.def_property_readonly("type_info", &ngraph::Node::get_type_info); - node.def_property("friendly_name", &ngraph::Node::get_friendly_name, &ngraph::Node::set_friendly_name); - - node.def("get_attributes", &impl::get_attributes); - node.def("set_attribute", &impl::set_attribute); - // for backwards compatibility, this is how this method was named until 2021.4 - node.def("_get_attributes", &impl::get_attributes); - // for backwards compatibility, this is how this method was named until 2021.4 - node.def("_set_attribute", &impl::set_attribute); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::OutputVector& arguments) { - return self->set_arguments(arguments); - }); - node.def("validate", [](const std::shared_ptr& self) { - return self->constructor_validate_and_infer_types(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node.hpp b/src/bindings/python/src/compatibility/pyngraph/node.hpp deleted file mode 100644 index 03734ae1f98504..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Node(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp b/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp deleted file mode 100644 index 65e1646f742ae4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "node_factory.hpp" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/check.hpp" -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/opsets/opset.hpp" -#include "openvino/opsets/opset.hpp" - -namespace py = pybind11; - -namespace { -class NodeFactory { -public: - NodeFactory() {} - NodeFactory(const std::string& opset_name) : m_opset(get_opset(opset_name)) {} - - std::shared_ptr create(const std::string op_type_name, - const ngraph::OutputVector& arguments, - const py::dict& attributes = py::dict()) { - std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); - - NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); - NGRAPH_CHECK(!ngraph::op::is_constant(op_node), - "Currently NodeFactory doesn't support Constant node: ", - op_type_name); - - util::DictAttributeDeserializer visitor(attributes, m_variables); - - op_node->set_arguments(arguments); - op_node->visit_attributes(visitor); - op_node->constructor_validate_and_infer_types(); - - return op_node; - } - - std::shared_ptr create(const std::string op_type_name) { - std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); - - NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); - NGRAPH_CHECK(!ngraph::op::is_constant(op_node), - "Currently NodeFactory doesn't support Constant node: ", - op_type_name); - - return op_node; - } - -private: - const ngraph::OpSet& get_opset(std::string opset_ver) { - std::locale loc; - std::transform(opset_ver.begin(), opset_ver.end(), opset_ver.begin(), [&loc](char c) { - return std::tolower(c, loc); - }); - - const auto& s_opsets = ngraph::get_available_opsets(); - - auto it = s_opsets.find(opset_ver); - if (it == s_opsets.end()) { - OPENVINO_THROW("Unsupported opset version requested."); - } - return it->second(); - } - - const ngraph::OpSet& m_opset = ngraph::get_opset11(); - std::unordered_map> m_variables; -}; -} // namespace - -void regclass_pyngraph_NodeFactory(py::module m) { - py::class_ node_factory(m, "NodeFactory", py::module_local()); - node_factory.doc() = "NodeFactory creates nGraph nodes"; - - node_factory.def(py::init()); - node_factory.def(py::init()); - - node_factory.def("create", [](NodeFactory& self, const std::string name) { - return self.create(name); - }); - node_factory.def("create", - [](NodeFactory& self, - const std::string name, - const ngraph::OutputVector& arguments, - const py::dict& attributes) { - return self.create(name, arguments, attributes); - }); - - node_factory.def("__repr__", [](const NodeFactory& self) { - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp b/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp deleted file mode 100644 index d7835165d669a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_NodeFactory(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_input.cpp b/src/bindings/python/src/compatibility/pyngraph/node_input.cpp deleted file mode 100644 index c6af1e05d50225..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_input.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node_input.hpp" - -#include - -#include "dict_attribute_visitor.hpp" -#include "pyngraph/node_input.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Input(py::module m) { - py::class_, std::shared_ptr>> input(m, - "Input", - py::dynamic_attr(), - py::module_local()); - input.doc() = "ngraph.impl.Input wraps ngraph::Input"; - - input.def("get_node", - &ngraph::Input::get_node, - R"( - Get node referenced by this input handle. - - Returns - ---------- - get_node : Node - Node object referenced by this input handle. - )"); - input.def("get_index", - &ngraph::Input::get_index, - R"( - The index of the input referred to by this input handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - input.def("get_element_type", - &ngraph::Input::get_element_type, - R"( - The element type of the input referred to by this input handle. - - Returns - ---------- - get_element_type : Type - Type of the input. - )"); - input.def("get_shape", - &ngraph::Input::get_shape, - R"( - The shape of the input referred to by this input handle. - - Returns - ---------- - get_shape : Shape - Shape of the input. - )"); - input.def("get_partial_shape", - &ngraph::Input::get_partial_shape, - R"( - The partial shape of the input referred to by this input handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the input. - )"); - input.def("get_source_output", - &ngraph::Input::get_source_output, - R"( - A handle to the output that is connected to this input. - - Returns - ---------- - get_source_output : Output - Output that is connected to the input. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_input.hpp b/src/bindings/python/src/compatibility/pyngraph/node_input.hpp deleted file mode 100644 index f4eaa0aa7acca0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_input.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Input(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_output.cpp b/src/bindings/python/src/compatibility/pyngraph/node_output.cpp deleted file mode 100644 index 569f1bdf6e1ff0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_output.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node_output.hpp" - -#include - -#include "dict_attribute_visitor.hpp" -#include "pyngraph/node_output.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Output(py::module m) { - py::class_, std::shared_ptr>> output(m, - "Output", - py::dynamic_attr(), - py::module_local()); - output.doc() = "ngraph.impl.Output wraps ngraph::Output"; - - output.def("get_node", - &ngraph::Output::get_node, - R"( - Get node referenced by this output handle. - - Returns - ---------- - get_node : Node - Node object referenced by this output handle. - )"); - output.def("get_index", - &ngraph::Output::get_index, - R"( - The index of the output referred to by this output handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - output.def("get_element_type", - &ngraph::Output::get_element_type, - R"( - The element type of the output referred to by this output handle. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - output.def("get_shape", - &ngraph::Output::get_shape, - R"( - The shape of the output referred to by this output handle. - - Returns - ---------- - get_shape : Shape - Shape of the output. - )"); - output.def("get_partial_shape", - &ngraph::Output::get_partial_shape, - R"( - The partial shape of the output referred to by this output handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the output. - )"); - output.def("get_target_inputs", - &ngraph::Output::get_target_inputs, - R"( - A set containing handles for all inputs targeted by the output - referenced by this output handle. - Returns - ---------- - get_target_inputs : Set[Input] - Set of Inputs. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_output.hpp b/src/bindings/python/src/compatibility/pyngraph/node_output.hpp deleted file mode 100644 index db94e760d58a58..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_output.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Output(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp deleted file mode 100644 index 5b4fd01ea162d8..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/constant.hpp" - -#include -#include -#include -#include - -#include -#include - -#include "ngraph/shape.hpp" -#include "pyngraph/ops/constant.hpp" - -namespace py = pybind11; - -template -std::vector _get_byte_strides(const ngraph::Shape& s) { - std::vector byte_strides; - std::vector element_strides = ngraph::row_major_strides(s); - for (auto v : element_strides) { - byte_strides.push_back(static_cast(v) * sizeof(T)); - } - return byte_strides; -} - -template -py::buffer_info _get_buffer_info(const ngraph::op::Constant& c) { - ngraph::Shape shape = c.get_shape(); - return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ - static_cast(c.get_element_type().size()), /* Size of one scalar */ - py::format_descriptor::format(), /* Python struct-style format descriptor */ - static_cast(shape.size()), /* Number of dimensions */ - std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ - _get_byte_strides(shape) /* Strides (in bytes) for each index */ - ); -} - -template <> -py::buffer_info _get_buffer_info(const ngraph::op::Constant& c) { - ngraph::Shape shape = c.get_shape(); - return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ - static_cast(c.get_element_type().size()), /* Size of one scalar */ - std::string(1, 'H'), /* Python struct-style format descriptor */ - static_cast(shape.size()), /* Number of dimensions */ - std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ - _get_byte_strides(shape) /* Strides (in bytes) for each index */ - ); -} - -template -py::array _cast_vector(const ngraph::op::Constant& self) { - auto vec = self.cast_vector(); - return py::array(vec.size(), vec.data()); -} - -void regclass_pyngraph_op_Constant(py::module m) { - py::class_, ngraph::Node> constant( - m, - "Constant", - py::buffer_protocol(), - py::module_local()); - constant.doc() = "ngraph.impl.op.Constant wraps ngraph::op::Constant"; - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - - constant.def("get_value_strings", &ngraph::op::Constant::get_value_strings); - - constant.def("get_vector", [](const ngraph::op::Constant& self) { - auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f64) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i8) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i64) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u64) { - return _cast_vector(self); - } else { - throw std::runtime_error("Unsupported data type!"); - } - }); - - // Provide buffer access - constant.def_buffer([](const ngraph::op::Constant& self) -> py::buffer_info { - auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f64) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i8) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i64) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u64) { - return _get_buffer_info(self); - } else { - throw std::runtime_error("Unsupported data type!"); - } - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp deleted file mode 100644 index e1a4324b778e26..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Constant(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp deleted file mode 100644 index 43f0358ea04199..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/parameter.hpp" - -#include -#include - -#include - -#include "ngraph/node.hpp" -#include "ngraph/partial_shape.hpp" // ngraph::PartialShape -#include "pyngraph/ops/parameter.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_Parameter(py::module m) { - py::class_, ngraph::Node> parameter( - m, - "Parameter", - py::module_local()); - parameter.doc() = "ngraph.impl.op.Parameter wraps ngraph::op::Parameter"; - parameter.def("__repr__", [](const ngraph::Node& self) { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape = py::cast(self.get_output_partial_shape(0)).attr("__str__")().cast(); - std::string type = self.get_element_type().c_type_string(); - return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shape + ", " + type + ")>"; - }); - - parameter.def(py::init()); - parameter.def(py::init()); - // parameter.def_property_readonly("description", &ngraph::op::Parameter::description); - - parameter.def( - "get_partial_shape", - (const ngraph::PartialShape& (ngraph::op::Parameter::*)() const) & ngraph::op::Parameter::get_partial_shape); - parameter.def("get_partial_shape", - (ngraph::PartialShape & (ngraph::op::Parameter::*)()) & ngraph::op::Parameter::get_partial_shape); - parameter.def("set_partial_shape", &ngraph::op::Parameter::set_partial_shape); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp deleted file mode 100644 index d09e1dbe238753..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Parameter(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp deleted file mode 100644 index 92b05896754ac0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/result.hpp" - -#include -#include - -#include - -#include "ngraph/node.hpp" -#include "pyngraph/ops/result.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_Result(py::module m) { - py::class_, ngraph::Node> result(m, - "Result", - py::module_local()); - result.doc() = "ngraph.impl.op.Result wraps ngraph::op::Result"; -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp deleted file mode 100644 index 3a62bcffda4dcb..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Result(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp deleted file mode 100644 index f4ea868ed46cd3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/arithmetic_reduction.hpp" - -#include -#include - -#include "ngraph/op/op.hpp" -#include "pyngraph/ops/util/arithmetic_reduction.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_ArithmeticReduction(py::module m) { - py::class_> - arithmeticReduction(m, "ArithmeticReduction", py::module_local()); - // arithmeticReduction.def(py::init&, - // const ngraph::AxisSet& >()); - arithmeticReduction.def("get_reduction_axes", &ngraph::op::util::ArithmeticReduction::get_reduction_axes); - arithmeticReduction.def("set_reduction_axes", &ngraph::op::util::ArithmeticReduction::set_reduction_axes); - - arithmeticReduction.def_property("reduction_axes", - &ngraph::op::util::ArithmeticReduction::get_reduction_axes, - &ngraph::op::util::ArithmeticReduction::set_reduction_axes); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp deleted file mode 100644 index ff3aa03d2fe27c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_ArithmeticReduction(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp deleted file mode 100644 index 698afbe72124e1..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m) { - py::class_> - binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp deleted file mode 100644 index dbaf2d6adf89b4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp deleted file mode 100644 index b86b3d52b90eb7..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_comparison.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m) { - py::class_> - binaryElementwiseComparison(m, "BinaryElementwiseComparison", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp deleted file mode 100644 index 2f4043cdff420d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp deleted file mode 100644 index 8db524492ea39b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_logical.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_logical.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m) { - py::class_> - binaryElementwiseLogical(m, "BinaryElementwiseLogical", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp deleted file mode 100644 index 4f8ba39f532376..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp deleted file mode 100644 index be9132386a95e7..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/index_reduction.hpp" - -#include -#include - -#include "ngraph/op/op.hpp" -#include "pyngraph/ops/util/index_reduction.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_IndexReduction(py::module m) { - py::class_> indexReduction( - m, - "IndexReduction", - py::module_local()); - - indexReduction.def("get_reduction_axis", &ngraph::op::util::IndexReduction::get_reduction_axis); - indexReduction.def("set_reduction_axis", &ngraph::op::util::IndexReduction::set_reduction_axis); - indexReduction.def("get_index_element_type", &ngraph::op::util::IndexReduction::get_index_element_type); - indexReduction.def("set_index_element_type", &ngraph::op::util::IndexReduction::set_index_element_type); - - indexReduction.def_property("reduction_axis", - &ngraph::op::util::IndexReduction::get_reduction_axis, - &ngraph::op::util::IndexReduction::set_reduction_axis); - indexReduction.def_property("index_element_type", - &ngraph::op::util::IndexReduction::get_index_element_type, - &ngraph::op::util::IndexReduction::set_index_element_type); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp deleted file mode 100644 index 756e839ac610ff..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_IndexReduction(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp deleted file mode 100644 index a3da02357a4048..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/op_annotations.hpp" - -#include -#include - -#include "pyngraph/ops/util/op_annotations.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_OpAnnotations(py::module m) { - py::class_> opAnnotations( - m, - "OpAnnotations", - py::module_local()); - opAnnotations.def(py::init<>()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp deleted file mode 100644 index 699e1531dc5d1c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_OpAnnotations(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp deleted file mode 100644 index 7a5e5821138099..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_op_util(py::module m) { - py::module m_util = m.def_submodule("util", "module pyngraph.op.util"); - regclass_pyngraph_op_util_OpAnnotations(m_util); - regclass_pyngraph_op_util_ArithmeticReduction(m_util); - regclass_pyngraph_op_util_BinaryElementwiseArithmetic(m_util); - regclass_pyngraph_op_util_BinaryElementwiseComparison(m_util); - regclass_pyngraph_op_util_BinaryElementwiseLogical(m_util); - regclass_pyngraph_op_util_UnaryElementwiseArithmetic(m_util); - regclass_pyngraph_op_util_IndexReduction(m_util); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp deleted file mode 100644 index 57d73c3f8ecbb5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "pyngraph/ops/util/arithmetic_reduction.hpp" -#include "pyngraph/ops/util/binary_elementwise_arithmetic.hpp" -#include "pyngraph/ops/util/binary_elementwise_comparison.hpp" -#include "pyngraph/ops/util/binary_elementwise_logical.hpp" -#include "pyngraph/ops/util/index_reduction.hpp" -#include "pyngraph/ops/util/op_annotations.hpp" -#include "pyngraph/ops/util/unary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_op_util(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp deleted file mode 100644 index 98b524a2dc94ac..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" - -#include -#include - -#include "pyngraph/ops/util/unary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m) { - py::class_> - unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp deleted file mode 100644 index 9744721a0c1b88..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp b/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp deleted file mode 100644 index 69c333d729bb30..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/partial_shape.hpp" // ngraph::PartialShape - -#include -#include - -#include -#include -#include - -#include "ngraph/shape.hpp" // ngraph::Shape -#include "openvino/core/dimension.hpp" // ov::Dimension -#include "pyngraph/partial_shape.hpp" - -namespace py = pybind11; - -static const char* CAPSULE_NAME = "ngraph_partial_shape"; - -void regclass_pyngraph_PartialShape(py::module m) { - py::class_> shape(m, - "PartialShape", - py::module_local()); - shape.doc() = "ngraph.impl.PartialShape wraps ngraph::PartialShape"; - - shape.def(py::init([](const std::vector& dimensions) { - return ngraph::PartialShape(std::vector(dimensions.begin(), dimensions.end())); - })); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init()); - shape.def(py::init()); - - shape.def_static("dynamic", &ngraph::PartialShape::dynamic, py::arg("r") = ov::Dimension()); - - shape.def_property_readonly("is_dynamic", - &ngraph::PartialShape::is_dynamic, - R"( - False if this shape is static, else True. - A shape is considered static if it has static rank, - and all dimensions of the shape are static. - )"); - shape.def_property_readonly("is_static", - &ngraph::PartialShape::is_static, - R"( - True if this shape is static, else False. - A shape is considered static if it has static rank, - and all dimensions of the shape are static. - )"); - shape.def_property_readonly("rank", - &ngraph::PartialShape::rank, - R"( - The rank of the shape. - )"); - shape.def_property_readonly("all_non_negative", - &ngraph::PartialShape::all_non_negative, - R"( - True if all static dimensions of the tensor are - non-negative, else False. - )"); - - shape.def("compatible", - &ngraph::PartialShape::compatible, - py::arg("s"), - R"( - Check whether this shape is compatible with the argument, i.e., - whether it is possible to merge them. - - Parameters - ---------- - s : PartialShape - The shape to be checked for compatibility with this shape. - - - Returns - ---------- - compatible : bool - True if this shape is compatible with s, else False. - )"); - shape.def("refines", - &ngraph::PartialShape::refines, - py::arg("s"), - R"( - Check whether this shape is a refinement of the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - refines : bool - True if this shape refines s, else False. - )"); - shape.def("relaxes", - &ngraph::PartialShape::relaxes, - py::arg("s"), - R"( - Check whether this shape is a relaxation of the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - relaxes : bool - True if this shape relaxes s, else False. - )"); - shape.def("same_scheme", - &ngraph::PartialShape::same_scheme, - py::arg("s"), - R"( - Check whether this shape represents the same scheme as the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - same_scheme : bool - True if shape represents the same scheme as s, else False. - )"); - shape.def("get_max_shape", - &ngraph::PartialShape::get_max_shape, - R"( - Returns - ---------- - get_max_shape : Shape - Get the max bounding shape. - )"); - shape.def("get_min_shape", - &ngraph::PartialShape::get_min_shape, - R"( - Returns - ---------- - get_min_shape : Shape - Get the min bounding shape. - )"); - shape.def("get_shape", - &ngraph::PartialShape::get_shape, - R"( - Returns - ---------- - get_shape : Shape - Get the unique shape. - )"); - shape.def("to_shape", - &ngraph::PartialShape::to_shape, - R"( - Returns - ---------- - to_shapess : Shape - Get the unique shape. - )"); - shape.def( - "get_dimension", - [](const ngraph::PartialShape& self, size_t index) -> ov::Dimension { - return self[index]; - }, - py::arg("index"), - R"( - Get the dimension at specified index of a partial shape. - - Parameters - ---------- - index : int - The index of dimension - - Returns - ---------- - get_dimension : Dimension - Get the particular dimension of a partial shape. - )"); - - shape.def( - "__eq__", - [](const ngraph::PartialShape& a, const ngraph::PartialShape& b) { - return a == b; - }, - py::is_operator()); - shape.def( - "__eq__", - [](const ngraph::PartialShape& a, const ngraph::Shape& b) { - return a == b; - }, - py::is_operator()); - - shape.def("__str__", [](const ngraph::PartialShape& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - shape.def("__repr__", [](const ngraph::PartialShape& self) -> std::string { - return "() + ">"; - }); - - shape.def_static("from_capsule", [](py::object* capsule) { - // get the underlying PyObject* which is a PyCapsule pointer - auto* pybind_capsule_ptr = capsule->ptr(); - // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME - auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); - - auto* ngraph_pShape = static_cast*>(capsule_ptr); - if (ngraph_pShape && *ngraph_pShape) { - return *ngraph_pShape; - } else { - throw std::runtime_error("The provided capsule does not contain an ngraph::PartialShape"); - } - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp b/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp deleted file mode 100644 index 9553b8e5aa8e2f..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_PartialShape(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp b/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp deleted file mode 100644 index bd05454cddea1c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "ngraph/pass/constant_folding.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/pass.hpp" -#include "ngraph/pass/validate.hpp" -#include "pyngraph/passes/manager.hpp" - -namespace py = pybind11; - -namespace { -class ManagerWrapper : public ngraph::pass::Manager { -public: - ManagerWrapper() {} - ~ManagerWrapper() {} - void register_pass(std::string pass_name) { - if (pass_name == "ConstantFolding") - push_pass(); - - if (m_per_pass_validation) { - push_pass(); - } - return; - } -}; -} // namespace - -void regclass_pyngraph_passes_Manager(py::module m) { - py::class_ manager(m, "Manager", py::module_local()); - manager.doc() = "ngraph.impl.passes.Manager wraps ngraph::pass::Manager using ManagerWrapper"; - - manager.def(py::init<>()); - - manager.def("set_per_pass_validation", &ManagerWrapper::set_per_pass_validation); - manager.def("run_passes", &ManagerWrapper::run_passes); - manager.def("register_pass", (void (ManagerWrapper::*)(std::string)) & ManagerWrapper::register_pass); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp b/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp deleted file mode 100644 index 2134cb949e651b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_passes_Manager(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp b/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp deleted file mode 100644 index 7ad10bed80f6a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/passes/regmodule_pyngraph_passes.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_passes(py::module m) { - py::module m_passes = m.def_submodule("passes", "Package ngraph.impl.passes wraps ngraph::passes"); - regclass_pyngraph_passes_Manager(m_passes); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp b/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp deleted file mode 100644 index 194fb9182f5c37..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "pyngraph/passes/manager.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_passes(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp b/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp deleted file mode 100644 index eefa6dc378e199..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "pyngraph/axis_set.hpp" -#include "pyngraph/axis_vector.hpp" -#include "pyngraph/coordinate.hpp" -#include "pyngraph/coordinate_diff.hpp" -#include "pyngraph/dimension.hpp" -#include "pyngraph/discrete_type_info.hpp" -#include "pyngraph/function.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/node_factory.hpp" -#include "pyngraph/node_input.hpp" -#include "pyngraph/node_output.hpp" -#include "pyngraph/ops/constant.hpp" -#include "pyngraph/ops/parameter.hpp" -#include "pyngraph/ops/result.hpp" -#include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp" -#include "pyngraph/partial_shape.hpp" -#include "pyngraph/passes/regmodule_pyngraph_passes.hpp" -#include "pyngraph/rt_map.hpp" -#include "pyngraph/shape.hpp" -#include "pyngraph/strides.hpp" -#include "pyngraph/types/regmodule_pyngraph_types.hpp" -#include "pyngraph/util.hpp" -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -PYBIND11_MODULE(_pyngraph, m) { - m.doc() = "Package ngraph.impl that wraps nGraph's namespace ngraph"; - regclass_pyngraph_PyRTMap(m); - regmodule_pyngraph_types(m); - regclass_pyngraph_Dimension(m); // Dimension must be registered before PartialShape - regclass_pyngraph_Shape(m); - regclass_pyngraph_PartialShape(m); - regclass_pyngraph_Node(m); - regclass_pyngraph_Input(m); - regclass_pyngraph_Output(m); - regclass_pyngraph_NodeFactory(m); - regclass_pyngraph_Strides(m); - regclass_pyngraph_CoordinateDiff(m); - regclass_pyngraph_DiscreteTypeInfo(m); - regclass_pyngraph_AxisSet(m); - regclass_pyngraph_AxisVector(m); - regclass_pyngraph_Coordinate(m); - py::module m_op = m.def_submodule("op", "Package ngraph.impl.op that wraps ngraph::op"); - regclass_pyngraph_op_Constant(m_op); - regclass_pyngraph_op_Parameter(m_op); - regclass_pyngraph_op_Result(m_op); - regmodule_pyngraph_op_util(m_op); - regclass_pyngraph_Function(m); - regmodule_pyngraph_passes(m); - regmodule_pyngraph_util(m); - regclass_pyngraph_Variant(m); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp b/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp deleted file mode 100644 index 779cd66971fb81..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/rt_map.hpp" - -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/subtract.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -using PyRTMap = ov::RTMap; - -PYBIND11_MAKE_OPAQUE(PyRTMap); - -void regclass_pyngraph_PyRTMap(py::module m) { - auto py_map = py::bind_map(m, "PyRTMap", py::module_local()); - py_map.doc() = "ngraph.impl.PyRTMap makes bindings for std::map>, which can later be used as ngraph::Node::RTMap"; - - py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const std::string v) { - m[k] = v; - }); - py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const int64_t v) { - m[k] = v; - }); - py_map.def("__getitem__", [](PyRTMap& m, const std::string& k) { - return m.at(k).as>(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp b/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp deleted file mode 100644 index 1b40dfc8cee640..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_PyRTMap(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/shape.cpp b/src/bindings/python/src/compatibility/pyngraph/shape.cpp deleted file mode 100644 index f43dda508a2e2b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/shape.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/shape.hpp" // ngraph::Shape - -#include -#include - -#include -#include -#include - -#include "pyngraph/shape.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Shape(py::module m) { - py::class_> shape(m, "Shape", py::module_local()); - shape.doc() = "ngraph.impl.Shape wraps ngraph::Shape"; - shape.def(py::init&>(), py::arg("axis_lengths")); - shape.def(py::init&>(), py::arg("axis_lengths")); - shape.def(py::init(), py::arg("axis_lengths")); - shape.def("__len__", [](const ngraph::Shape& v) { - return v.size(); - }); - shape.def("__getitem__", [](const ngraph::Shape& v, int key) { - return v[key]; - }); - - shape.def( - "__iter__", - [](ngraph::Shape& v) { - return py::make_iterator(v.begin(), v.end()); - }, - py::keep_alive<0, 1>()); /* Keep vector alive while iterator is used */ - - shape.def("__str__", [](const ngraph::Shape& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - shape.def("__repr__", [](const ngraph::Shape& self) -> std::string { - return "() + ">"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/shape.hpp b/src/bindings/python/src/compatibility/pyngraph/shape.hpp deleted file mode 100644 index 1f0cd8b369997c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/shape.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Shape(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/strides.cpp b/src/bindings/python/src/compatibility/pyngraph/strides.cpp deleted file mode 100644 index b740336e2288f5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/strides.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/strides.hpp" // ngraph::Strides - -#include -#include - -#include -#include -#include - -#include "pyngraph/strides.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Strides(py::module m) { - py::class_> strides(m, "Strides", py::module_local()); - strides.doc() = "ngraph.impl.Strides wraps ngraph::Strides"; - strides.def(py::init&>(), py::arg("axis_strides")); - strides.def(py::init&>(), py::arg("axis_strides")); - strides.def(py::init(), py::arg("axis_strides")); - - strides.def("__str__", [](const ngraph::Strides& self) -> std::string { - std::stringstream stringstream; - std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); - std::string string = stringstream.str(); - return string.substr(0, string.size() - 2); - }); - - strides.def("__repr__", [](const ngraph::Strides& self) -> std::string { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape_str = py::cast(self).attr("__str__")().cast(); - return "<" + class_name + ": (" + shape_str + ")>"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/strides.hpp b/src/bindings/python/src/compatibility/pyngraph/strides.hpp deleted file mode 100644 index 619f20b77562e8..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/strides.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Strides(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp b/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp deleted file mode 100644 index 070a89acd7876a..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/type/element_type.hpp" - -#include -#include - -#include "ngraph/op/parameter.hpp" -#include "pyngraph/types/element_type.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Type(py::module m) { - py::class_> type(m, "Type", py::module_local()); - type.doc() = "ngraph.impl.Type wraps ngraph::element::Type"; - type.attr("boolean") = ngraph::element::boolean; - type.attr("f16") = ngraph::element::f16; - type.attr("f32") = ngraph::element::f32; - type.attr("f64") = ngraph::element::f64; - type.attr("i8") = ngraph::element::i8; - type.attr("i16") = ngraph::element::i16; - type.attr("i32") = ngraph::element::i32; - type.attr("i64") = ngraph::element::i64; - type.attr("u1") = ngraph::element::u1; - type.attr("u8") = ngraph::element::u8; - type.attr("u16") = ngraph::element::u16; - type.attr("u32") = ngraph::element::u32; - type.attr("u64") = ngraph::element::u64; - type.attr("bf16") = ngraph::element::bf16; - - type.def("__repr__", [](const ngraph::element::Type& self) { - std::string bitwidth = std::to_string(self.bitwidth()); - if (self.is_signed()) { - return ""; - } - return ""; - }); - - type.def( - "__eq__", - [](const ngraph::element::Type& a, const ngraph::element::Type& b) { - return a == b; - }, - py::is_operator()); - - type.def_property_readonly("bitwidth", &ngraph::element::Type::bitwidth); - type.def_property_readonly("is_real", &ngraph::element::Type::is_real); - type.def("get_type_name", &ngraph::element::Type::get_type_name); - type.def("to_string", &ngraph::element::Type::to_string); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp b/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp deleted file mode 100644 index 4a345dd6357ee4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Type(py::module m); -void regclass_pyngraph_Bool(py::module m); -void regclass_pyngraph_Float32(py::module m); -void regclass_pyngraph_Float64(py::module m); -void regclass_pyngraph_Int8(py::module m); -// void regclass_pyngraph_Int16(py::module m); -void regclass_pyngraph_Int32(py::module m); -void regclass_pyngraph_Int64(py::module m); -void regclass_pyngraph_UInt8(py::module m); -// void regclass_pyngraph_UInt16(py::module m); -void regclass_pyngraph_UInt32(py::module m); -void regclass_pyngraph_UInt64(py::module m); -void regclass_pyngraph_BFloat16(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp b/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp deleted file mode 100644 index 188107323fef55..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/types/regmodule_pyngraph_types.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_types(py::module m) { - regclass_pyngraph_Type(m); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp b/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp deleted file mode 100644 index 36c0b6aae59798..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "pyngraph/types/element_type.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_types(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/util.cpp b/src/bindings/python/src/compatibility/pyngraph/util.cpp deleted file mode 100644 index 042b0eea8ec6a5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/util.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/util.hpp" - -#include - -#include - -#include "ngraph/op/result.hpp" -#include "ngraph/validation_util.hpp" -#include "ngraph/version.hpp" -#include "openvino/runtime/core.hpp" - -namespace py = pybind11; - -inline void* numpy_to_c(py::array a) { - py::buffer_info info = a.request(); - return info.ptr; -} - -void regmodule_pyngraph_util(py::module m) { - py::module mod = m.def_submodule("util", "ngraph.impl.util"); - mod.def("numpy_to_c", &numpy_to_c); - mod.def("get_constant_from_source", - &ngraph::get_constant_from_source, - py::arg("output"), - R"( - Runs an estimation of source tensor. - Parameters - ---------- - output : Output - output node - Returns - ---------- - get_constant_from_source : Constant or None - If it succeeded to calculate both bounds and - they are the same returns Constant operation - from the resulting bound, otherwise Null. - )"); - - mod.def("get_ngraph_version_string", []() -> std::string { - NGRAPH_SUPPRESS_DEPRECATED_START - return get_ngraph_version_string(); - NGRAPH_SUPPRESS_DEPRECATED_END - }); - - mod.def("get_ie_output_name", [](const ngraph::Output& output) { - return ov::op::util::get_ie_output_name(output); - }); - - mod.def("shutdown", - &ov::shutdown, - R"( - Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing - dependent resources - - This function should be used by advanced user to control unload the resources. - - You might want to use this function if you are developing a dynamically-loaded library which should clean up all - resources after itself when the library is unloaded. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/util.hpp b/src/bindings/python/src/compatibility/pyngraph/util.hpp deleted file mode 100644 index 1b1b8978fdfe67..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/util.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_util(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/variant.cpp b/src/bindings/python/src/compatibility/pyngraph/variant.cpp deleted file mode 100644 index 5682b46123931e..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/variant.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Variant(py::module m) { - py::class_ variant_base(m, "Variant", py::module_local()); - variant_base.doc() = "ngraph.impl.Variant wraps ngraph::Variant"; - - variant_base.def( - "__eq__", - [](const ov::Any& a, const ov::Any& b) { - return a == b; - }, - py::is_operator()); - variant_base.def( - "__eq__", - [](const ov::Any& a, const std::string& b) { - return a.as() == b; - }, - py::is_operator()); - variant_base.def( - "__eq__", - [](const ov::Any& a, const int64_t& b) { - return a.as() == b; - }, - py::is_operator()); - - variant_base.def("__repr__", [](const ov::Any self) { - std::stringstream ret; - self.print(ret); - return ret.str(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/variant.hpp b/src/bindings/python/src/compatibility/pyngraph/variant.hpp deleted file mode 100644 index d9a1307db9edb3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/variant.hpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include -#include - -#include "openvino/core/any.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Variant(py::module m);