Deepspeed have too many ops now, and it take too many time to pre-build
all ops.
I notice deepspeed disabled `ninja` 4 years ago
(https://github.com/microsoft/DeepSpeed/pull/298) and I think we should
consider enable it now.
The issue mentioned in https://github.com/microsoft/DeepSpeed/pull/298
can be solved by resolving `include_dirs` to absolute path.

---------

Co-authored-by: Logan Adams <114770087+loadams@users.noreply.github.com>
Co-authored-by: Logan Adams <loadams@microsoft.com>
Co-authored-by: Olatunji Ruwase <olruwase@microsoft.com>
Co-authored-by: Michael Wyatt <michaelwyatt@microsoft.com>
This commit is contained in:
Jinzhen Lin 2024-02-21 10:20:11 +08:00 коммит произвёл GitHub
Родитель 005afe124f
Коммит b00533e479
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
17 изменённых файлов: 34 добавлений и 21 удалений

Просмотреть файл

@ -453,9 +453,10 @@ class OpBuilder(ABC):
def builder(self):
from torch.utils.cpp_extension import CppExtension
include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
return CppExtension(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
include_dirs=include_dirs,
extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
@ -638,7 +639,7 @@ class CUDAOpBuilder(OpBuilder):
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
else:
from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
{'cxx': self.strip_empty_entries(self.cxx_args()), \
'nvcc': self.strip_empty_entries(self.nvcc_args())}
@ -651,7 +652,7 @@ class CUDAOpBuilder(OpBuilder):
cuda_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
include_dirs=include_dirs,
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args,
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
@ -702,11 +703,18 @@ class CUDAOpBuilder(OpBuilder):
'-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
]
else:
try:
nvcc_threads = int(os.getenv("DS_NVCC_THREADS", ""))
if nvcc_threads <= 0:
raise ValueError("")
except ValueError:
nvcc_threads = min(os.cpu_count(), 8)
cuda_major, _ = installed_cuda_version()
args += [
'-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
'-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__'
'-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', f'--threads={nvcc_threads}'
]
if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
args.append('--ptxas-options=-v')

Просмотреть файл

@ -3,6 +3,8 @@
# DeepSpeed Team
import os
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
@ -16,12 +18,12 @@ class CPUOpBuilder(OpBuilder):
def builder(self):
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
cpp_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
include_dirs=include_dirs,
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)

Просмотреть файл

@ -4,6 +4,8 @@
# DeepSpeed Team
import os
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
@ -17,12 +19,12 @@ class CPUOpBuilder(OpBuilder):
def builder(self):
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
cpp_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
include_dirs=include_dirs,
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)

Просмотреть файл

@ -60,13 +60,13 @@ class InferenceCoreBuilder(CUDAOpBuilder):
sources = [
"inference/v2/kernels/core_ops/core_ops.cpp",
"inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp",
"inference/v2/kernels/core_ops/bias_activations/bias_activation.cu",
"inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu",
"inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp",
"inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cu",
"inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu",
"inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp",
"inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cu",
"inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu",
"inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp",
"inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cu",
"inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu",
]
prefix = self.get_prefix()

Просмотреть файл

@ -63,18 +63,18 @@ class RaggedOpsBuilder(CUDAOpBuilder):
"inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp",
"inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp",
"inference/v2/kernels/ragged_ops/embed/embed.cpp",
"inference/v2/kernels/ragged_ops/embed/embed.cu",
"inference/v2/kernels/ragged_ops/embed/embed_cuda.cu",
"inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp",
"inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cu",
"inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu",
"inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp",
"inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cu",
"inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu",
"inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp",
"inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cu",
"inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu",
"inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp",
"inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cu",
"inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu",
"inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp",
"inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp",
"inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cu",
"inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu",
]
prefix = self.get_prefix()

Просмотреть файл

@ -23,11 +23,11 @@ class SYCLOpBuilder(OpBuilder):
from intel_extension_for_pytorch.xpu.cpp_extension import DPCPPExtension
except ImportError:
from intel_extension_for_pytorch.xpu.utils import DPCPPExtension
include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
print("dpcpp sources = {}".format(self.sources()))
dpcpp_ext = DPCPPExtension(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
include_dirs=include_dirs,
extra_compile_args={
'cxx': self.strip_empty_entries(self.cxx_args()),
},

Просмотреть файл

@ -119,7 +119,8 @@ cmdclass = {}
# For any pre-installed ops force disable ninja.
if torch_available:
from accelerator import get_accelerator
cmdclass['build_ext'] = get_accelerator().build_extension().with_options(use_ninja=False)
use_ninja = not is_env_set("DS_DISABLE_NINJA")
cmdclass['build_ext'] = get_accelerator().build_extension().with_options(use_ninja=use_ninja)
if torch_available:
TORCH_MAJOR = torch.__version__.split('.')[0]