rename the package to onnxruntime_extensions (#92)

* rename the package to onnxruntime_extensions

* fix the cmake file
This commit is contained in:
Wenbing Li 2021-05-12 12:02:57 -07:00 коммит произвёл GitHub
Родитель c3e694ddfa
Коммит 3806e29421
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
30 изменённых файлов: 44 добавлений и 45 удалений

Просмотреть файл

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.16.0)
project(ortcustomops VERSION 0.1.0 LANGUAGES C CXX)
project(onnxruntime_extensions LANGUAGES C CXX)
# set(CMAKE_VERBOSE_MAKEFILE ON)
if(NOT CMAKE_BUILD_TYPE)
@ -8,7 +8,6 @@ if(NOT CMAKE_BUILD_TYPE)
endif()
project(onnxruntime_extensions)
set(CPACK_PACKAGE_NAME "onnxruntime_extensions")
set(CPACK_PACKAGE_VERSION_MAJOR "0")
set(CPACK_PACKAGE_VERSION_MINOR "2")
@ -194,7 +193,7 @@ if(OCOS_ENABLE_PYTHON)
find_package(Python3 COMPONENTS Interpreter Development)
if (WIN32)
list(APPEND shared_TARGET_SRC "${PROJECT_SOURCE_DIR}/onnxruntime_customops/ortcustomops.def")
list(APPEND shared_TARGET_SRC "${PROJECT_SOURCE_DIR}/onnxruntime_extensions/ortcustomops.def")
endif()
Python3_add_library(ortcustomops SHARED ${TARGET_SRC_PYOPS} ${shared_TARGET_SRC})
@ -296,7 +295,7 @@ if (OCOS_ENABLE_CTEST)
endif()
set(TEST_DATA_SRC ${TEST_SRC_DIR}/data)
set(TEST_DATA_DES ${ortcustomops_BINARY_DIR}/data)
set(TEST_DATA_DES ${onnxruntime_extensions_BINARY_DIR}/data)
# Copy test data from source to destination.
add_custom_command(

Просмотреть файл

@ -5,13 +5,13 @@ ONNXRuntime Extensions is a comprehensive package to extend the capability of th
1. The CustomOp C++ library for [ONNX Runtime](http://onnxruntime.ai) on ONNXRuntime CustomOp API.
2. Support PyOp feature to implement the custom op with a Python function.
3. Build all-in-one ONNX model from the pre/post processing code, go to [docs/pre_post_processing.md](docs/pre_post_processing.md) for details.
4. Support Python per operator debugging, checking ```hook_model_op``` in onnxruntime_customops Python package.
4. Support Python per operator debugging, checking ```hook_model_op``` in onnxruntime_extensions Python package.
# Quick Start
The following code shows how to run ONNX model and ONNXRuntime customop more straightforwardly.
```python
import numpy
from onnxruntime_customops import PyOrtFunction, VectorToString
from onnxruntime_extensions import PyOrtFunction, VectorToString
# <ProjectDir>/tutorials/data/gpt-2/gpt2_tok.onnx
encode = PyOrtFunction.from_model('gpt2_tok.onnx')
# https://github.com/onnx/models/blob/master/text/machine_comprehension/gpt-2/model/gpt2-lm-head-10.onnx
@ -46,7 +46,7 @@ The CustomOp library was written with C++, so that it supports run the model in
Of course, with Python language, the thing becomes much easier since PyOrtFunction will directly translate the ONNX model into a python function. But if the ONNXRuntime Custom Python API want to be used, the inference process will be
```python
import onnxruntime as _ort
from onnxruntime_customops import get_library_path as _lib_path
from onnxruntime_extensions import get_library_path as _lib_path
so = _ort.SessionOptions()
so.register_custom_ops_library(_lib_path())
@ -60,7 +60,7 @@ so.register_custom_ops_library(_lib_path())
Welcome to contribute the customop C++ implementation directly in this repository, which will widely benefit other users. Besides C++, if you want to quickly verify the ONNX model with some custom operators with Python language, PyOp will help with that
```python
import numpy
from onnxruntime_customops import PyOp, onnx_op
from onnxruntime_extensions import PyOp, onnx_op
# Implement the CustomOp by decorating a function with onnx_op
@onnx_op(op_type="Inverse", inputs=[PyOp.dt_float])

Просмотреть файл

@ -2,16 +2,16 @@
Most pre and post processing of the DL models are written in Python code, when the user running the converted ONNX model with Python snippets, it would be very efficient and productive to convert these code snippets into the ONNX model, since the ONNX graph is actually a computation graph, it can represent the most programming code, theoretically.
In the onnxruntime_customops package, there is a utility to help on that. This tool is to trace the data flow in the processing code and convert all operation in the tracing logging into the ONNX graph, and merge all these graphs into in one single ONNX model. It supports the Python numeric operators and PyTorch's operation APIs (only a subset of the tensor API)
In the onnxruntime_extensions package, there is a utility to help on that. This tool is to trace the data flow in the processing code and convert all operation in the tracing logging into the ONNX graph, and merge all these graphs into in one single ONNX model. It supports the Python numeric operators and PyTorch's operation APIs (only a subset of the tensor API)
###Usage
In the onnxruntime_customops.utils, there is an API ```trace_for_onnx```, when it was fed with the input variables in Python code, it start a tracing session to log all operation starting from these variables. Also if there are some PyTorch API calls in the processing code, you need replace the import statement from ```import torch``` to ```from onnxruntime_customops import mytorch as torch```, which will enable these PyTorch API can be traced as well.
In the onnxruntime_extensions.utils, there is an API ```trace_for_onnx```, when it was fed with the input variables in Python code, it start a tracing session to log all operation starting from these variables. Also if there are some PyTorch API calls in the processing code, you need replace the import statement from ```import torch``` to ```from onnxruntime_extensions import mytorch as torch```, which will enable these PyTorch API can be traced as well.
Overall, it will look like:
```python
from onnxruntime_customops.utils import trace_for_onnx
from onnxruntime_customops import mytorch as torch # overload torch API if it is needed
from onnxruntime_extensions.utils import trace_for_onnx
from onnxruntime_extensions import mytorch as torch # overload torch API if it is needed
# the raw input, like text, image, or ...
input_text = ...
@ -27,7 +27,7 @@ with trace_for_onnx(input_text, names=['string_input']) as tc_sess:
Then the all-in-one model can be inference from the raw text directly
```python
from onnxruntime_customops.eager_op import EagerOp
from onnxruntime_extensions.eager_op import EagerOp
# the input raw text
input_text = ...
full_model = EagerOp.from_model('<all_in_one.onnx>')

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -16,7 +16,7 @@ def get_library_path():
The custom operator library binary path
:return: A string of the this library path.
"""
mod = sys.modules['onnxruntime_customops._ortcustomops']
mod = sys.modules['onnxruntime_extensions._ortcustomops']
return mod.__file__

Просмотреть файл

Просмотреть файл

@ -1,7 +1,7 @@
"""
override the torch importing, to dump all torch operators during the processing code.
!!!This package depends on onnxruntime_customops root package, but not vice versa.!!!
, since this package fully relies on pytorch, while the onnxruntime_customops doesn't
!!!This package depends on onnxruntime_extensions root package, but not vice versa.!!!
, since this package fully relies on pytorch, while the onnxruntime_extensions doesn't
"""
try:
import torch

Просмотреть файл

Просмотреть файл

@ -56,7 +56,7 @@ class BuildCMakeExt(_build_ext):
Perform build_cmake before doing the 'normal' stuff
"""
for extension in self.extensions:
if extension.name == 'onnxruntime_customops._ortcustomops':
if extension.name == 'onnxruntime_extensions._ortcustomops':
self.build_cmake(extension)
def build_cmake(self, extension):
@ -118,7 +118,7 @@ def read_requirements():
# read version from the package file.
def read_version():
version_str = '1.0.0'
with (open(os.path.join(TOP_DIR, 'onnxruntime_customops/__init__.py'), "r")) as f:
with (open(os.path.join(TOP_DIR, 'onnxruntime_extensions/__init__.py'), "r")) as f:
line = [_ for _ in [_.strip("\r\n ")
for _ in f.readlines()] if _.startswith("__version__")]
if len(line) > 0:
@ -132,18 +132,18 @@ if sys.platform == "win32":
ext_modules = [
setuptools.extension.Extension(
name=str('onnxruntime_customops._ortcustomops'),
name=str('onnxruntime_extensions._ortcustomops'),
sources=[])
]
packages = find_packages()
package_dir = {k: os.path.join('.', k.replace(".", "/")) for k in packages}
package_data = {
"onnxruntime_customops": ["*.dll", "*.so", "*.pyd"],
"onnxruntime_extensions": ["*.dll", "*.so", "*.pyd"],
}
setup(
name='onnxruntime_customops',
name='onnxruntime_extensions',
version=read_version(),
packages=packages,
package_dir=package_dir,

Просмотреть файл

@ -5,7 +5,7 @@ import onnxruntime as _ort
from pathlib import Path
from onnx import helper, onnx_pb as onnx_proto
from transformers import GPT2Tokenizer
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op,
enable_custom_op,
PyCustomOpDef,

Просмотреть файл

@ -3,7 +3,7 @@ import unittest
import numpy as np
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op, PyCustomOpDef,
get_library_path as _get_library_path)

Просмотреть файл

@ -3,8 +3,8 @@ import onnx
import unittest
import torchvision
import numpy as np
from onnxruntime_customops.utils import trace_for_onnx, op_from_model
from onnxruntime_customops import eager_op, hook_model_op, PyOp, mytorch as torch
from onnxruntime_extensions.utils import trace_for_onnx, op_from_model
from onnxruntime_extensions import eager_op, hook_model_op, PyOp, mytorch as torch
class TestTorchE2E(unittest.TestCase):

Просмотреть файл

@ -4,7 +4,7 @@ import numpy as np
from numpy.testing import assert_almost_equal
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op, PyCustomOpDef,
get_library_path as _get_library_path)

Просмотреть файл

@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import assert_almost_equal
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op, PyCustomOpDef,
get_library_path as _get_library_path)
import tensorflow as tf

Просмотреть файл

@ -4,7 +4,7 @@ import unittest
import numpy as np
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op,
enable_custom_op,
PyCustomOpDef,

Просмотреть файл

@ -4,7 +4,7 @@ import unittest
import numpy as np
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op,
enable_custom_op,
PyCustomOpDef,

Просмотреть файл

@ -9,7 +9,7 @@ import numpy as np
from numpy.testing import assert_almost_equal
from onnx import helper, onnx_pb as onnx_proto
import onnxruntime as _ort
from onnxruntime_customops import (
from onnxruntime_extensions import (
onnx_op, PyCustomOpDef,
get_library_path as _get_library_path,
hash_64)

Просмотреть файл

@ -1,6 +1,6 @@
import unittest
import numpy as np
from onnxruntime_customops.eager_op import EagerOp, StringToVector
from onnxruntime_extensions.eager_op import EagerOp, StringToVector
def _run_string_to_vector(input, output, map, unk):

Просмотреть файл

@ -9,13 +9,13 @@ import onnxruntime as _ort
from onnx import load
from torch.onnx import register_custom_op_symbolic
from onnxruntime_customops import (
from onnxruntime_extensions import (
PyOp,
onnx_op,
hook_model_op,
get_library_path as _get_library_path)
from onnxruntime_customops.eager_op import EagerOp
from onnxruntime_extensions.eager_op import EagerOp
def my_inverse(g, self):

Просмотреть файл

@ -1,6 +1,6 @@
import unittest
import numpy as np
from onnxruntime_customops.eager_op import EagerOp, VectorToString
from onnxruntime_extensions.eager_op import EagerOp, VectorToString
def _run_vector_to_string(input, output, map, unk):

Просмотреть файл

@ -1,8 +1,8 @@
import os
import numpy
from transformers import AutoConfig
from onnxruntime_customops import mytorch as torch, eager_op
from onnxruntime_customops.utils import trace_for_onnx, op_from_model, build_customop_model
from onnxruntime_extensions import mytorch as torch, eager_op
from onnxruntime_extensions.utils import trace_for_onnx, op_from_model, build_customop_model
device = 'cpu'

Просмотреть файл

@ -6,7 +6,7 @@
"source": [
"# Convert And Inference Pytorch model with CustomOps\n",
"\n",
"With onnxruntime_customops package, the Pytorch model with the operation cannot be converted into the standard ONNX operators still be converted and the converted ONNX model still can be run with ONNXRuntime, plus onnxruntime_customops package. This tutorial show it works"
"With onnxruntime_extensions package, the Pytorch model with the operation cannot be converted into the standard ONNX operators still be converted and the converted ONNX model still can be run with ONNXRuntime, plus onnxruntime_extensions package. This tutorial show it works"
]
},
{
@ -91,7 +91,7 @@
"metadata": {},
"source": [
"## Inference\n",
"This converted model cannot directly run the onnxruntime due to the custom operator. but it can run with onnxruntime_customops easily.\n",
"This converted model cannot directly run the onnxruntime due to the custom operator. but it can run with onnxruntime_extensions easily.\n",
"\n",
"Firstly, let define a PyOp function to inteprete the custom op node in the ONNNX model."
]
@ -103,7 +103,7 @@
"outputs": [],
"source": [
"import numpy\n",
"from onnxruntime_customops import onnx_op, PyOp\n",
"from onnxruntime_extensions import onnx_op, PyOp\n",
"@onnx_op(op_type=\"Inverse\")\n",
"def inverse(x):\n",
" # the user custom op implementation here:\n",
@ -133,7 +133,7 @@
}
],
"source": [
"from onnxruntime_customops import PyOrtFunction\n",
"from onnxruntime_extensions import PyOrtFunction\n",
"onnx_fn = PyOrtFunction.from_model(onnx_model)\n",
"y = onnx_fn(x0.numpy())\n",
"print(y)"
@ -180,7 +180,7 @@
}
],
"source": [
"from onnxruntime_customops import enable_custom_op\n",
"from onnxruntime_extensions import enable_custom_op\n",
"# disable the PyOp function and run with the C++ function\n",
"enable_custom_op(False)\n",
"y = onnx_fn(x0.numpy())\n",

Просмотреть файл

@ -35,7 +35,7 @@
"- Case 3: Defining new custom ops in C++\n",
" - Likely better perf than Python but requires building the customops repo from source\n",
"\n",
"For cases 1 and 2, you can use the off-the-shelf pip package `onnxruntime_customops`. For case 3, you will need to clone and build the customops repo. Follow the instructions [here](https://github.com/microsoft/ort-customops#getting-started).\n",
"For cases 1 and 2, you can use the off-the-shelf pip package `onnxruntime_extensions`. For case 3, you will need to clone and build the customops repo. Follow the instructions [here](https://github.com/microsoft/ort-customops#getting-started).\n",
"\n",
"You will also need to install the onnxruntime, tensorflow, and tf2onnx packages. **NOTE: tf2onnx version (FIXME) is required for this tutorial.**"
],
@ -211,7 +211,7 @@
],
"source": [
"import onnxruntime as ort\n",
"from onnxruntime_customops import get_library_path\n",
"from onnxruntime_extensions import get_library_path\n",
"\n",
"so = ort.SessionOptions()\n",
"so.register_custom_ops_library(get_library_path())\n",
@ -452,7 +452,7 @@
"outputs": [],
"source": [
"import numpy as np\n",
"from onnxruntime_customops import onnx_op, PyCustomOpDef\n",
"from onnxruntime_extensions import onnx_op, PyCustomOpDef\n",
"\n",
"@onnx_op(op_type=\"UnsortedSegmentJoin\",\n",
" inputs=[PyCustomOpDef.dt_string, PyCustomOpDef.dt_int32, PyCustomOpDef.dt_int32],\n",
@ -491,7 +491,7 @@
],
"source": [
"import onnxruntime as ort\n",
"from onnxruntime_customops import get_library_path\n",
"from onnxruntime_extensions import get_library_path\n",
"\n",
"so = ort.SessionOptions()\n",
"so.register_custom_ops_library(get_library_path())\n",