Add the missing tasks and fix the issues in CUDA python unit test pipelines (#615)

* add an experimental CUDA python unit test pipeline

* typo

* in ci.yml?

* winpycuda

* move it in optional

* enable cuda pytest in linuxbuild

* build in docker

* add the cuda pytest for windows

* cuda flag fixing

* minor fixing

* typo

---------

Co-authored-by: Yi Zhang <zhanyi@microsoft.com>
This commit is contained in:
Wenbing Li 2023-12-08 10:19:54 -08:00 коммит произвёл GitHub
Родитель 90067494d3
Коммит dce0d9c72c
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 124 добавлений и 42 удалений

Просмотреть файл

@ -521,6 +521,34 @@ stages:
ctest -C RelWithDebInfo --output-on-failure
displayName: Run C++ native tests
- task: UsePythonVersion@0
inputs:
versionSpec: '3.x'
disableDownloadFromRegistry: true
addToPath: false
architecture: 'x64'
displayName: Use ADO python task
- script: |
set CUDA_PATH=$(Agent.TempDirectory)\v11.8
python -m pip install --upgrade setuptools pip
python -m pip install numpy coloredlogs flatbuffers packaging protobuf sympy
python -m pip install onnxruntime-gpu==$(ORT_VERSION)
python -m pip install -v --config-settings "ortx-user-option=use-cuda" .
displayName: Build and install onnxruntime-extensions CUDA package.
- script: |
python -m pip install -r requirements-dev.txt
python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
displayName: Install dependencies for Python unit tests
- script: |
cd test
python -m pytest . --verbose
cd cuda
python -m pytest . --verbose
displayName: Run python test for CPU and CUDA kernels
- stage: LinuxCUDABuilds
dependsOn: []
jobs:
@ -609,26 +637,30 @@ stages:
workingDirectory: $(Build.SourcesDirectory)
displayName: Run C++ native tests
- script: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools
python -m pip install onnxruntime-gpu==$(ORT_VERSION)
python -m pip install -r requirements.txt
displayName: Install requirements.txt
- script: |
python -m pip install .
displayName: Build the library and tests
- script: python -m pip install $(TORCH_VERSION)
displayName: Install pytorch
- script: |
python -m pip install -r requirements-dev.txt
displayName: Install requirements-dev.txt
- script: cd test && python -m pytest . --verbose
displayName: Run python test
- task: CmdLine@2
inputs:
script: |
docker run --gpus all --rm \
--volume $(Build.SourcesDirectory):/onnxruntime-extensions \
--volume $(Build.SourcesDirectory)/onnxruntime-linux-x64-$(ORT_VERSION):/onnxruntime \
-e CUDA_PATH=/usr/local/cuda-11.8 \
onnxruntime-extensionscuda11build \
/bin/bash -c "
set -ex; \
pushd /onnxruntime-extensions; \
python3 -m pip install --upgrade pip; \
python3 -m pip install --upgrade setuptools; \
python3 -m pip install onnxruntime-gpu==$(ORT_VERSION); \
python3 -m pip install -r requirements.txt; \
python3 -m pip install -v --config-settings "ortx-user-option=use-cuda" . ; \
python3 -m pip install $(TORCH_VERSION) ; \
python3 -m pip install -r requirements-dev.txt; \
cd test && python -m pytest . --verbose; \
cd cuda && python -m pytest . --verbose; \
popd; \
"
workingDirectory: $(Build.SourcesDirectory)
displayName: Build the library and Python unit tests
- stage: WebAssemblyBuilds
dependsOn: []

Просмотреть файл

@ -123,7 +123,7 @@ stages:
dependsOn: []
jobs:
- job: OrtNightly
- job: OrtNightlyCPU
pool:
name: 'onnxruntime-extensions-Windows-CPU'
@ -152,3 +152,46 @@ stages:
cd test
python -m pytest . --verbose
displayName: Run python test
- job: WindowsPyCUDA
pool:
name: 'onnxruntime-extensions-Win2022-GPU-A10'
steps:
- template: templates/set_winenv.yml
parameters:
EnvSetupScript: 'set_env_cuda.bat'
DownloadCUDA: true
- script: |
nvidia-smi
nvcc --version
where nvcc
displayName: check cuda version
- task: UsePythonVersion@0
inputs:
versionSpec: '3.x'
disableDownloadFromRegistry: true
addToPath: false
architecture: 'x64'
displayName: Use ADO python task
- script: |
set CUDA_PATH=$(Agent.TempDirectory)\v11.8
python -m pip install --upgrade setuptools pip
python -m pip install numpy coloredlogs flatbuffers packaging protobuf sympy
python -m pip install -U --index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ ort-nightly-gpu
python -m pip install -v --config-settings "ortx-user-option=use-cuda" .
displayName: Build and install onnxruntime-extensions CUDA package.
- script: |
python -m pip install -r requirements-dev.txt
python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
displayName: Install dependencies for Python unit tests
- script: |
cd test
python -m pytest . --verbose
cd cuda
python -m pytest . --verbose
displayName: Run python test

Просмотреть файл

@ -7,7 +7,7 @@
import os
import sys
from setuptools import build_meta as _orig
from setuptools.build_meta import * # noqa: F403
from setuptools.build_meta import * # noqa: F401, F403
# add the current directory to the path, so we can import setup_cmds.py
sys.path.append(os.path.dirname(__file__))

Просмотреть файл

@ -20,12 +20,18 @@ ORTX_USER_OPTION = 'ortx-user-option'
def _load_cuda_version():
pattern = r"\bV\d+\.\d+\.\d+\b"
output = subprocess.check_output(["nvcc", "--version"]).decode("utf-8")
match = re.search(pattern, output)
if match:
vers = match.group()[1:].split('.')
return f"{vers[0]}.{vers[1]}" # only keep the major and minor version.
nvcc_path = 'nvcc'
cuda_path = os.environ.get('CUDA_PATH')
if cuda_path is not None:
nvcc_path = os.path.join(cuda_path, 'bin', 'nvcc')
try:
output = subprocess.check_output([nvcc_path, "--version"], stderr=subprocess.STDOUT).decode("utf-8")
pattern = r"\bV(\d+\.\d+\.\d+)\b"
match = re.search(pattern, output)
if match:
return match.group(1)
except subprocess.CalledProcessError:
pass
return None
@ -193,15 +199,16 @@ class CmdBuildCMakeExt(_build_ext):
cuda_flag = "OFF" if self.use_cuda == 0 else "ON"
cmake_args += ['-DOCOS_USE_CUDA=' + cuda_flag]
print("=> CUDA build flag: " + cuda_flag)
cuda_ver = _load_cuda_version()
if cuda_ver is None:
raise RuntimeError(
"Cannot find nvcc in your env:path, use-cuda doesn't work")
f_ver = ext_fullpath.parent / "_version.py"
with f_ver.open('a') as _f:
_f.writelines(["\n",
f"cuda = {cuda_ver}",
"\n"])
if cuda_flag == "ON":
cuda_ver = _load_cuda_version()
if cuda_ver is None:
raise RuntimeError("Cannot find nvcc in your env:path, use-cuda doesn't work")
if sys.platform == "win32":
cuda_path = os.environ.get("CUDA_PATH")
cmake_args += [f'-T cuda={cuda_path}']
f_ver = ext_fullpath.parent / "_version.py"
with f_ver.open('a') as _f:
_f.writelines(["\n", f"cuda = \"{cuda_ver}\"", "\n"])
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.

Просмотреть файл

@ -22,9 +22,9 @@ The package contains all custom operators and some Python scripts to manipulate
- use-cuda: enable CUDA kernel build in Python package.
- no-azure: disable AzureOp kernel build in Python package.
- no-opencv: disable operators based on OpenCV in build.
- cc_debug: Generate debug info for extensions binaries and disable C/C++ compiler optimization.
- cc-debug: Generate debug info for extensions binaries and disable C/C++ compiler optimization.
For example:`pip install --config-settings "ortx-user-option=use-cuda,cc_debug" `, This command builds CUDA
For example:`pip install --config-settings "ortx-user-option=use-cuda,cc-debug" `, This command builds CUDA
kernels into the package and installs it, accompanied by the generation of debug information.
Test:

Просмотреть файл

@ -19,11 +19,11 @@ class TestCudaOps(unittest.TestCase):
]
input0 = helper.make_tensor_value_info(
'x', onnx_proto.TensorProto.FLOAT, [])
'x', onnx_proto.TensorProto.FLOAT, [None, None])
output1 = helper.make_tensor_value_info(
'neg', onnx_proto.TensorProto.FLOAT, [])
'neg', onnx_proto.TensorProto.FLOAT, [None, None])
output2 = helper.make_tensor_value_info(
'pos', onnx_proto.TensorProto.FLOAT, [])
'pos', onnx_proto.TensorProto.FLOAT, [None, None])
graph = helper.make_graph(nodes, 'test0', [input0], [output1, output2])
model = make_onnx_model(graph)