Fix warning - LegacyKeyValueFormat: "ENV key=value" should be used instead of legacy "ENV key value" format (#22800)
### Description This PR Fix warning - `LegacyKeyValueFormat: "ENV key=value" should be used instead of legacy "ENV key value" format` from all Dockerfile ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. -->
This commit is contained in:
Родитель
1f3b675453
Коммит
885a7acd45
|
@ -10,7 +10,7 @@ FROM rocm/pytorch:rocm6.2.3_ubuntu22.04_py3.10_pytorch_release_2.3.0
|
|||
ARG ONNXRUNTIME_REPO=https://github.com/Microsoft/onnxruntime
|
||||
ARG ONNXRUNTIME_BRANCH=main
|
||||
|
||||
ENV PATH /code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
||||
ENV PATH=/code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
||||
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y migraphx
|
||||
|
|
|
@ -11,7 +11,7 @@ FROM openvino/ubuntu22_runtime:${OPENVINO_VERSION} AS builder
|
|||
|
||||
ENV WORKDIR_PATH=/home/openvino
|
||||
WORKDIR $WORKDIR_PATH
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ARG DEVICE=CPU
|
||||
ARG ONNXRUNTIME_REPO=https://github.com/microsoft/onnxruntime.git
|
||||
|
@ -41,7 +41,7 @@ RUN tar cvf GPL_sources.tar.gz /sources
|
|||
# Deploy stage
|
||||
FROM openvino/ubuntu22_runtime:${OPENVINO_VERSION}
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
USER root
|
||||
COPY --from=builder /home/openvino/onnxruntime/build/Linux/Release/dist/*.whl ./
|
||||
COPY --from=builder /GPL_sources.tar.gz ./
|
||||
|
@ -50,7 +50,7 @@ ARG BUILD_UID=1001
|
|||
ARG BUILD_USER=onnxruntimedev
|
||||
RUN adduser --uid $BUILD_UID $BUILD_USER
|
||||
RUN usermod -a -G video,users ${BUILD_USER}
|
||||
ENV WORKDIR_PATH /home/${BUILD_USER}
|
||||
ENV WORKDIR_PATH=/home/${BUILD_USER}
|
||||
WORKDIR ${WORKDIR_PATH}
|
||||
|
||||
USER ${BUILD_USER}
|
||||
|
|
|
@ -12,7 +12,7 @@ ARG ONNXRUNTIME_BRANCH=main
|
|||
|
||||
WORKDIR /code
|
||||
|
||||
ENV PATH /code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
||||
ENV PATH=/code/cmake-3.27.3-linux-x86_64/bin:${PATH}
|
||||
|
||||
# Prepare onnxruntime repository & build onnxruntime
|
||||
RUN git clone --single-branch --branch ${ONNXRUNTIME_BRANCH} --recursive ${ONNXRUNTIME_REPO} onnxruntime &&\
|
||||
|
|
|
@ -17,7 +17,7 @@ RUN apt-get update &&\
|
|||
RUN unattended-upgrade
|
||||
|
||||
WORKDIR /code
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/code/cmake-3.27.3-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/code/cmake-3.27.3-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
|
||||
# Prepare onnxruntime repository & build onnxruntime with TensorRT
|
||||
RUN git clone --single-branch --branch ${ONNXRUNTIME_BRANCH} --recursive ${ONNXRUNTIME_REPO} onnxruntime &&\
|
||||
|
|
|
@ -22,8 +22,8 @@ RUN apt-get update && \
|
|||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH /code/cmake-3.27.3-linux-x86_64/bin:$PATH
|
||||
ENV LD_LIBRARY_PATH /opt/xilinx/xrt/lib:$LD_LIBRARY_PATH
|
||||
ENV PATH=/code/cmake-3.27.3-linux-x86_64/bin:$PATH
|
||||
ENV LD_LIBRARY_PATH=/opt/xilinx/xrt/lib:$LD_LIBRARY_PATH
|
||||
|
||||
WORKDIR /code
|
||||
RUN . $VAI_ROOT/conda/etc/profile.d/conda.sh &&\
|
||||
|
|
|
@ -46,7 +46,7 @@ RUN cd MLNX_OFED_LINUX-${MOFED_VERSION}-${MOFED_OS}-x86_64 && \
|
|||
rm -r MLNX_OFED_LINUX-${MOFED_VERSION}-${MOFED_OS}-x86_64
|
||||
|
||||
ENV PATH=${OLD_PATH}
|
||||
ENV unset OLD_PATH
|
||||
ENV unset=OLD_PATH
|
||||
|
||||
# python env
|
||||
RUN pip3 install --upgrade setuptools
|
||||
|
|
|
@ -42,5 +42,5 @@ ARG BUILD_USER=onnxruntimedev
|
|||
RUN adduser --uid $BUILD_UID $BUILD_USER
|
||||
WORKDIR /home/$BUILD_USER
|
||||
USER $BUILD_USER
|
||||
ENV PATH /usr/local/dotnet:$PATH
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV PATH=/usr/local/dotnet:$PATH
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
|
@ -201,5 +201,5 @@ ARG BUILD_USER=onnxruntimedev
|
|||
RUN adduser --uid $BUILD_UID $BUILD_USER
|
||||
WORKDIR /home/$BUILD_USER
|
||||
USER $BUILD_USER
|
||||
ENV PATH /usr/local/dotnet:$PATH
|
||||
ENV PATH=/usr/local/dotnet:$PATH
|
||||
ENV ORTMODULE_ONNX_OPSET_VERSION=$OPSET_VERSION
|
||||
|
|
|
@ -9,7 +9,7 @@ ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubi8
|
|||
ARG TRT_VERSION=8.6.1.6-1.cuda11.8
|
||||
FROM $BASEIMAGE AS base
|
||||
ARG TRT_VERSION
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
|
||||
RUN dnf install -y bash wget &&\
|
||||
dnf clean dbcache
|
||||
|
|
|
@ -9,7 +9,7 @@ ARG BASEIMAGE=nvidia/cuda:12.5.1-cudnn-devel-ubi8
|
|||
ARG TRT_VERSION=10.6.0.26-1.cuda12.6
|
||||
FROM $BASEIMAGE AS base
|
||||
ARG TRT_VERSION
|
||||
ENV PATH /opt/python/cp310-cp310/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV PATH=/opt/python/cp310-cp310/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
|
||||
RUN dnf install -y bash wget &&\
|
||||
dnf clean dbcache
|
||||
|
|
|
@ -9,7 +9,7 @@ ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubi8
|
|||
ARG TRT_VERSION=10.6.0.26-1.cuda11.8
|
||||
FROM $BASEIMAGE AS base
|
||||
ARG TRT_VERSION
|
||||
ENV PATH /opt/python/cp38-cp38/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV PATH=/opt/python/cp38-cp38/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
|
||||
RUN dnf install -y bash wget &&\
|
||||
dnf clean dbcache
|
||||
|
|
|
@ -10,7 +10,7 @@ ARG TRT_VERSION=10.6.0.26-1+cuda11.8
|
|||
ARG LD_LIBRARY_PATH_ARG=/usr/local/lib64:/usr/local/cuda/lib64
|
||||
FROM $BASEIMAGE AS base
|
||||
ARG TRT_VERSION
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH_ARG}:${LD_LIBRARY_PATH}
|
||||
|
|
|
@ -10,7 +10,7 @@ ARG TRT_VERSION=10.6.0.26-1+cuda11.8
|
|||
ARG LD_LIBRARY_PATH_ARG=/usr/local/lib64:/usr/local/cuda/lib64
|
||||
FROM $BASEIMAGE AS base
|
||||
ARG TRT_VERSION
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH_ARG}:${LD_LIBRARY_PATH}
|
||||
|
|
|
@ -10,7 +10,7 @@ FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 AS base
|
|||
# The local directory into which to build and install CMAKE
|
||||
ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code
|
||||
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update &&\
|
||||
|
@ -82,7 +82,7 @@ RUN if [ -z "$ONNXRUNTIME_COMMIT_ID" ] ; then echo "Building branch ${ONNXRUNTIM
|
|||
git reset --hard ${ONNXRUNTIME_COMMIT_ID} && git submodule update --recursive ; fi
|
||||
|
||||
# Build ORT
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
ARG PARSER_CONFIG=""
|
||||
RUN /bin/sh build.sh ${PARSER_CONFIG} --parallel --build_shared_lib --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests --skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 AS base
|
|||
# The local directory into which to build and install CMAKE
|
||||
ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code
|
||||
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update &&\
|
||||
|
@ -98,7 +98,7 @@ RUN if [ -z "$ONNXRUNTIME_COMMIT_ID" ] ; then echo "Building branch ${ONNXRUNTIM
|
|||
git reset --hard ${ONNXRUNTIME_COMMIT_ID} && git submodule update --recursive ; fi
|
||||
|
||||
# Build ORT
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
ARG PARSER_CONFIG=""
|
||||
RUN /bin/sh build.sh ${PARSER_CONFIG} --parallel --build_shared_lib --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests --skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ FROM nvidia/cuda:12.3.1-devel-ubuntu20.04 AS base
|
|||
# The local directory into which to build and install CMAKE
|
||||
ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code
|
||||
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update &&\
|
||||
|
@ -85,7 +85,7 @@ RUN if [ -z "$ONNXRUNTIME_COMMIT_ID" ] ; then echo "Building branch ${ONNXRUNTIM
|
|||
git reset --hard ${ONNXRUNTIME_COMMIT_ID} && git submodule update --recursive ; fi
|
||||
|
||||
# Build ORT
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
ARG PARSER_CONFIG=""
|
||||
RUN /bin/sh build.sh ${PARSER_CONFIG} --parallel --build_shared_lib --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests --skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ FROM nvidia/cuda:12.5.1-cudnn-devel-ubuntu20.04 AS base
|
|||
# The local directory into which to build and install CMAKE
|
||||
ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code
|
||||
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update &&\
|
||||
|
@ -98,7 +98,7 @@ RUN if [ -z "$ONNXRUNTIME_COMMIT_ID" ] ; then echo "Building branch ${ONNXRUNTIM
|
|||
git reset --hard ${ONNXRUNTIME_COMMIT_ID} && git submodule update --recursive ; fi
|
||||
|
||||
# Build ORT
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
ARG PARSER_CONFIG=""
|
||||
RUN /bin/sh build.sh ${PARSER_CONFIG} --parallel --build_shared_lib --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests --skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
|
||||
|
||||
|
|
|
@ -12,10 +12,10 @@ RUN /tmp/scripts/install_python_deps.sh -p $PYTHON_VERSION -d EdgeDevice
|
|||
RUN apt update && apt install -y libnuma1 ocl-icd-libopencl1 && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/scripts
|
||||
|
||||
ENV INTEL_OPENVINO_DIR /opt/intel/openvino_${OPENVINO_VERSION}
|
||||
ENV LD_LIBRARY_PATH $INTEL_OPENVINO_DIR/runtime/lib/intel64:$INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib:/usr/local/openblas/lib:$LD_LIBRARY_PATH
|
||||
ENV OpenVINO_DIR $INTEL_OPENVINO_DIR/runtime/cmake
|
||||
ENV IE_PLUGINS_PATH $INTEL_OPENVINO_DIR/runtime/lib/intel64
|
||||
ENV INTEL_OPENVINO_DIR=/opt/intel/openvino_${OPENVINO_VERSION}
|
||||
ENV LD_LIBRARY_PATH=$INTEL_OPENVINO_DIR/runtime/lib/intel64:$INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib:/usr/local/openblas/lib:$LD_LIBRARY_PATH
|
||||
ENV OpenVINO_DIR=$INTEL_OPENVINO_DIR/runtime/cmake
|
||||
ENV IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/runtime/lib/intel64
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN cd /opt && mkdir -p intel && cd intel && \
|
||||
|
|
|
@ -10,7 +10,7 @@ FROM nvidia/cuda:12.5.1-cudnn-devel-ubuntu20.04 AS base
|
|||
# The local directory into which to build and install CMAKE
|
||||
ARG ONNXRUNTIME_LOCAL_CODE_DIR=/code
|
||||
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${ONNXRUNTIME_LOCAL_CODE_DIR}/cmake-3.30.1-linux-x86_64/bin:/opt/miniconda/bin:${PATH}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update &&\
|
||||
|
@ -92,7 +92,7 @@ RUN if [ -z "$ONNXRUNTIME_COMMIT_ID" ] ; then echo "Building branch ${ONNXRUNTIM
|
|||
git reset --hard ${ONNXRUNTIME_COMMIT_ID} && git submodule update --recursive ; fi
|
||||
|
||||
# Build ORT
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
ARG PARSER_CONFIG=""
|
||||
RUN /bin/sh build.sh ${PARSER_CONFIG} --parallel --build_shared_lib --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt --tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests --skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
|
||||
|
||||
|
|
|
@ -31,11 +31,11 @@ else \
|
|||
echo "TRT_VERSION is none skipping Tensor RT Installation" ; \
|
||||
fi
|
||||
|
||||
ENV PATH /usr/lib/jvm/msopenjdk-11/bin:$PATH
|
||||
ENV PATH=/usr/lib/jvm/msopenjdk-11/bin:$PATH
|
||||
ENV LANG=en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
ENV JAVA_HOME=/usr/lib/jvm/msopenjdk-11
|
||||
ENV CUDAHOSTCXX /opt/rh/gcc-toolset-11/root/usr/bin/g++
|
||||
ENV CUDAHOSTCXX=/opt/rh/gcc-toolset-11/root/usr/bin/g++
|
||||
ADD scripts /tmp/scripts
|
||||
RUN cd /tmp/scripts && /tmp/scripts/install_deps.sh && rm -rf /tmp/scripts
|
||||
|
||||
|
|
|
@ -35,11 +35,11 @@ fi
|
|||
ENV LANG=en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
ENV CUDAHOSTCXX /opt/rh/gcc-toolset-12/root/usr/bin/g++
|
||||
ENV CUDAHOSTCXX=/opt/rh/gcc-toolset-12/root/usr/bin/g++
|
||||
ADD scripts /tmp/scripts
|
||||
RUN sed -i 's/enabled\s*=\s*1/enabled = 1\nexclude=dotnet* aspnet* netstandard*/g' /etc/yum.repos.d/ubi.repo && \
|
||||
rpm -Uvh https://packages.microsoft.com/config/centos/8/packages-microsoft-prod.rpm && dnf install -y msopenjdk-11 && cd /tmp/scripts && /tmp/scripts/install_deps.sh && rm -rf /tmp/scripts
|
||||
ENV PATH /usr/lib/jvm/msopenjdk-11/bin:$PATH
|
||||
ENV PATH=/usr/lib/jvm/msopenjdk-11/bin:$PATH
|
||||
ENV JAVA_HOME=/usr/lib/jvm/msopenjdk-11
|
||||
ARG BUILD_UID=1001
|
||||
ARG BUILD_USER=onnxruntimedev
|
||||
|
|
|
@ -32,8 +32,8 @@ else \
|
|||
echo "TRT_VERSION is x${TRT_VERSION} skipping Tensor RT Installation" ; \
|
||||
fi
|
||||
|
||||
ENV PATH /usr/local/cuda/bin:$PATH
|
||||
ENV CUDA_MODULE_LOADING "LAZY"
|
||||
ENV PATH=/usr/local/cuda/bin:$PATH
|
||||
ENV CUDA_MODULE_LOADING="LAZY"
|
||||
|
||||
ADD scripts /tmp/scripts
|
||||
RUN cd /tmp/scripts && /tmp/scripts/install_centos.sh && rm -rf /tmp/scripts
|
||||
|
|
Загрузка…
Ссылка в новой задаче