зеркало из https://github.com/microsoft/msccl.git
Update deb packaging scripts
This commit is contained in:
Родитель
f1e92fe2a3
Коммит
c9da89254b
2
Makefile
2
Makefile
|
@ -58,7 +58,7 @@ LIBSRCFILES := libwrap.cu core.cu all_gather.cu all_reduce.cu broadcast.cu reduc
|
|||
LIBNAME := libnccl.so
|
||||
VER_MAJOR := 1
|
||||
VER_MINOR := 0
|
||||
VER_PATCH := 0
|
||||
VER_PATCH := 2
|
||||
TESTS := all_gather_test all_reduce_test broadcast_test reduce_test reduce_scatter_test
|
||||
MPITESTS := mpi_test
|
||||
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
/*.debhelper.log
|
||||
/*.debhelper
|
||||
/*.substvars
|
||||
/tmp/
|
||||
/files
|
||||
/libnccl1/
|
||||
/libnccl-dev/
|
|
@ -1,3 +1,9 @@
|
|||
nccl (1.0.2-2) trusty; urgency=medium
|
||||
|
||||
* Packages are now libnccl1 and libnccl-dev
|
||||
|
||||
-- Luke Yeager <lyeager@nvidia.com> Fri, 18 Dec 2015 13:45:16 -0800
|
||||
|
||||
nccl (1.0.2) trusty; urgency=medium
|
||||
|
||||
* Merged latest upstream changes.
|
||||
|
|
|
@ -1,26 +1,31 @@
|
|||
Source: nccl
|
||||
Section: universe/science
|
||||
Section: universe/libs
|
||||
Maintainer: Boris Fomitchev <bfomitchev@nvidia.com>
|
||||
Priority: optional
|
||||
Build-depends: debhelper(>=9), nvidia-352-dev, cuda-toolkit-7-0,
|
||||
Build-depends: debhelper(>=9), cuda-toolkit-7-0,
|
||||
cuda-curand-dev-7-0, cuda-cublas-dev-7-0, cuda-cudart-dev-7-0, cuda-ld-conf-7-0
|
||||
Standards-Version: 3.9.5
|
||||
|
||||
Package: nccl
|
||||
Section: universe/tools
|
||||
Package: libnccl1
|
||||
Section: universe/libs
|
||||
Architecture: amd64
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, nvidia-352,
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends},
|
||||
cuda-curand-7-0, cuda-cublas-7-0, cuda-cudart-7-0, cuda-ld-conf-7-0
|
||||
Description: Optimized primitives for collective multi-GPU communication
|
||||
NCCL (pronounced "Nickel") is a stand-alone library of standard collective communication routines,
|
||||
such as all-gather, reduce, broadcast, etc., that have been optimized to achieve high bandwidth over PCIe.
|
||||
NCCL supports up to eight GPUs and can be used in either single- or multi-process (e.g., MPI) applications.
|
||||
Description: NVIDIA Communication Collectives Library (NCCL) Runtime
|
||||
NCCL (pronounced "Nickel") is a stand-alone library of standard collective
|
||||
communication routines,such as all-gather, reduce, broadcast, etc., that have
|
||||
been optimized to achieve high bandwidth over PCIe. NCCL supports up to eight
|
||||
GPUs and can be used in either single- or multi-process (e.g., MPI)
|
||||
applications.
|
||||
|
||||
Package: nccl-dev
|
||||
Section: universe/tools
|
||||
Package: libnccl-dev
|
||||
Section: universe/libdevel
|
||||
Architecture: amd64
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, nccl (= ${binary:Version})
|
||||
Description: Dev package for NCCL library
|
||||
NCCL (pronounced "Nickel") is a stand-alone library of standard collective communication routines,
|
||||
such as all-gather, reduce, broadcast, etc., that have been optimized to achieve high bandwidth over PCIe.
|
||||
NCCL supports up to eight GPUs and can be used in either single- or multi-process (e.g., MPI) applications.
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, libnccl1 (= ${binary:Version})
|
||||
Description: NVIDIA Communication Collectives Library (NCCL) Development Files
|
||||
NCCL (pronounced "Nickel") is a stand-alone library of standard collective
|
||||
communication routines,such as all-gather, reduce, broadcast, etc., that have
|
||||
been optimized to achieve high bandwidth over PCIe. NCCL supports up to eight
|
||||
GPUs and can be used in either single- or multi-process (e.g., MPI)
|
||||
applications.
|
||||
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
|
||||
Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of NVIDIA CORPORATION nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
|
||||
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1 @@
|
|||
../LICENSE.txt
|
|
@ -1,15 +0,0 @@
|
|||
[DEFAULT]
|
||||
debian-branch = master
|
||||
upstream-branch = master
|
||||
verbose = True
|
||||
|
||||
ignore-new = True
|
||||
|
||||
[git-buildpackage]
|
||||
verbose = True
|
||||
no-purge = True
|
||||
|
||||
[git-import-orig]
|
||||
upstream-tag = v%(version)s
|
||||
upstream-tree = BRANCH
|
||||
dch = False
|
|
@ -0,0 +1,2 @@
|
|||
include/nccl.h usr/include
|
||||
lib/libnccl.so usr/lib
|
|
@ -0,0 +1,2 @@
|
|||
lib/libnccl.so.1 usr/lib
|
||||
lib/libnccl.so.1.0.2 usr/lib
|
|
@ -1,4 +0,0 @@
|
|||
usr/lib
|
||||
usr/lib/libnccl.so
|
||||
usr/include
|
||||
usr/include/nccl.h
|
|
@ -1,2 +0,0 @@
|
|||
usr/lib
|
||||
usr/lib/libnccl.so.1
|
|
@ -1 +0,0 @@
|
|||
file-in-usr-local
|
|
@ -1,5 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
ldconfig
|
|
@ -1,14 +1,8 @@
|
|||
#!/usr/bin/make -f
|
||||
|
||||
|
||||
|
||||
|
||||
# Enabling a single GPU for tests only
|
||||
# test.sh fails for non-idential multi-GPU
|
||||
# fix pending : https://github.com/torch/cutorch/issues/239
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
%:
|
||||
PREFIX="$(CURDIR)/usr" dh $@ --build-system=make --parallel
|
||||
dh $@ --parallel
|
||||
|
||||
override_dh_auto_install:
|
||||
PREFIX=debian/tmp dh_auto_install
|
||||
|
||||
override_dh_auto_test:
|
||||
echo "Skipping test: fix me later ..."
|
||||
|
|
Загрузка…
Ссылка в новой задаче