LightGBM/CMakeLists.txt

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

854 строки
29 KiB
CMake
Исходник Обычный вид История

option(USE_MPI "Enable MPI-based distributed learning" OFF)
option(USE_OPENMP "Enable OpenMP" ON)
option(USE_GPU "Enable GPU-accelerated training" OFF)
option(USE_SWIG "Enable SWIG to generate Java API" OFF)
option(USE_TIMETAG "Set to ON to output time costs" OFF)
option(USE_CUDA "Enable CUDA-accelerated training " OFF)
option(USE_DEBUG "Set to ON for Debug mode" OFF)
option(USE_SANITIZER "Use santizer flags" OFF)
option(USE_HOMEBREW_FALLBACK "(macOS-only) also look in 'brew --prefix' for libraries (e.g. OpenMP)" ON)
set(
ENABLED_SANITIZERS
"address" "leak" "undefined"
CACHE
STRING
"Semicolon separated list of sanitizer names, e.g., 'address;leak'. \
Supported sanitizers are address, leak, undefined and thread."
)
option(BUILD_CLI "Build the 'lightbgm' command-line interface in addition to lib_lightgbm" ON)
option(BUILD_CPP_TEST "Build C++ tests with Google Test" OFF)
option(BUILD_STATIC_LIB "Build static library" OFF)
option(INSTALL_HEADERS "Install headers to CMAKE_INSTALL_PREFIX (e.g. '/usr/local/include')" ON)
option(__BUILD_FOR_PYTHON "Set to ON if building lib_lightgbm for use with the Python-package" OFF)
option(__BUILD_FOR_R "Set to ON if building lib_lightgbm for use with the R-package" OFF)
option(__INTEGRATE_OPENCL "Set to ON if building LightGBM with the OpenCL ICD Loader and its dependencies included" OFF)
cmake_minimum_required(VERSION 3.28)
2016-08-05 09:06:01 +03:00
# If using Visual Studio generators, always target v10.x of the Windows SDK.
# Doing this avoids lookups that could fall back to very old versions, e.g. by finding
# outdated registry entries.
# ref: https://cmake.org/cmake/help/latest/variable/CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION.html
if(CMAKE_GENERATOR MATCHES "Visual Studio")
set(CMAKE_SYSTEM_VERSION 10.0 CACHE INTERNAL "target Windows SDK version" FORCE)
endif()
project(lightgbm LANGUAGES C CXX)
2016-08-05 09:06:01 +03:00
if(BUILD_CPP_TEST)
set(CMAKE_CXX_STANDARD 14)
else()
set(CMAKE_CXX_STANDARD 11)
endif()
set(CMAKE_CXX_STANDARD_REQUIRED ON)
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
#-- Sanitizer
if(USE_SANITIZER)
if(MSVC)
message(FATAL_ERROR "Sanitizers are not supported with MSVC.")
endif()
include(cmake/Sanitizer.cmake)
enable_sanitizers("${ENABLED_SANITIZERS}")
endif()
if(__INTEGRATE_OPENCL)
set(__INTEGRATE_OPENCL ON CACHE BOOL "" FORCE)
set(USE_GPU OFF CACHE BOOL "" FORCE)
message(STATUS "Building library with integrated OpenCL components")
endif()
if(__BUILD_FOR_PYTHON OR __BUILD_FOR_R OR USE_SWIG)
# the SWIG wrapper, the Python and R packages don't require the CLI
set(BUILD_CLI OFF)
# installing the SWIG wrapper, the R and Python packages shouldn't place LightGBM's headers
# outside of where the package is installed
set(INSTALL_HEADERS OFF)
endif()
2017-07-22 12:47:01 +03:00
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8.2")
message(FATAL_ERROR "Insufficient gcc version")
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "3.8")
message(FATAL_ERROR "Insufficient Clang version")
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.1.0")
message(FATAL_ERROR "Insufficient AppleClang version")
endif()
elseif(MSVC)
if(MSVC_VERSION LESS 1900)
message(
FATAL_ERROR
"The compiler ${CMAKE_CXX_COMPILER} doesn't support required C++11 features. Please use a newer MSVC."
)
endif()
endif()
2016-08-05 09:06:01 +03:00
if(USE_SWIG)
find_package(SWIG REQUIRED)
find_package(Java REQUIRED)
find_package(JNI REQUIRED)
include(UseJava)
include(UseSWIG)
set(SWIG_CXX_EXTENSION "cxx")
set(SWIG_EXTRA_LIBRARIES "")
set(SWIG_JAVA_EXTRA_FILE_EXTENSIONS ".java" "JNI.java")
set(SWIG_MODULE_JAVA_LANGUAGE "JAVA")
set(SWIG_MODULE_JAVA_SWIG_LANGUAGE_FLAG "java")
set(CMAKE_SWIG_OUTDIR "${CMAKE_CURRENT_BINARY_DIR}/java")
include_directories(Java_INCLUDE_DIRS)
include_directories(JNI_INCLUDE_DIRS)
include_directories($ENV{JAVA_HOME}/include)
if(WIN32)
set(LGBM_SWIG_DESTINATION_DIR "${CMAKE_CURRENT_BINARY_DIR}/com/microsoft/ml/lightgbm/windows/x86_64")
include_directories($ENV{JAVA_HOME}/include/win32)
elseif(APPLE)
set(LGBM_SWIG_DESTINATION_DIR "${CMAKE_CURRENT_BINARY_DIR}/com/microsoft/ml/lightgbm/osx/x86_64")
include_directories($ENV{JAVA_HOME}/include/darwin)
else()
set(LGBM_SWIG_DESTINATION_DIR "${CMAKE_CURRENT_BINARY_DIR}/com/microsoft/ml/lightgbm/linux/x86_64")
include_directories($ENV{JAVA_HOME}/include/linux)
endif()
file(MAKE_DIRECTORY "${LGBM_SWIG_DESTINATION_DIR}")
endif()
set(EIGEN_DIR "${PROJECT_SOURCE_DIR}/external_libs/eigen")
Trees with linear models at leaves (#3299) * Add Eigen library. * Working for simple test. * Apply changes to config params. * Handle nan data. * Update docs. * Add test. * Only load raw data if boosting=gbdt_linear * Remove unneeded code. * Minor updates. * Update to work with sk-learn interface. * Update to work with chunked datasets. * Throw error if we try to create a Booster with an already-constructed dataset having incompatible parameters. * Save raw data in binary dataset file. * Update docs and fix parameter checking. * Fix dataset loading. * Add test for regularization. * Fix bugs when saving and loading tree. * Add test for load/save linear model. * Remove unneeded code. * Fix case where not enough leaf data for linear model. * Simplify code. * Speed up code. * Speed up code. * Simplify code. * Speed up code. * Fix bugs. * Working version. * Store feature data column-wise (not fully working yet). * Fix bugs. * Speed up. * Speed up. * Remove unneeded code. * Small speedup. * Speed up. * Minor updates. * Remove unneeded code. * Fix bug. * Fix bug. * Speed up. * Speed up. * Simplify code. * Remove unneeded code. * Fix bug, add more tests. * Fix bug and add test. * Only store numerical features * Fix bug and speed up using templates. * Speed up prediction. * Fix bug with regularisation * Visual studio files. * Working version * Only check nans if necessary * Store coeff matrix as an array. * Align cache lines * Align cache lines * Preallocation coefficient calculation matrices * Small speedups * Small speedup * Reverse cache alignment changes * Change to dynamic schedule * Update docs. * Refactor so that linear tree learner is not a separate class. * Add refit capability. * Speed up * Small speedups. * Speed up add prediction to score. * Fix bug * Fix bug and speed up. * Speed up dataload. * Speed up dataload * Use vectors instead of pointers * Fix bug * Add OMP exception handling. * Change return type of LGBM_BoosterGetLinear to bool * Change return type of LGBM_BoosterGetLinear back to int, only parameter type needed to change * Remove unused internal_parent_ property of tree * Remove unused parameter to CreateTreeLearner * Remove reference to LinearTreeLearner * Minor style issues * Remove unneeded check * Reverse temporary testing change * Fix Visual Studio project files * Restore LightGBM.vcxproj.filters * Speed up * Speed up * Simplify code * Update docs * Simplify code * Initialise storage space for max num threads * Move Eigen to include directory and delete unused files * Remove old files. * Fix so it compiles with mingw * Fix gpu tree learner * Change AddPredictionToScore back to const * Fix python lint error * Fix C++ lint errors * Change eigen to a submodule * Update comment * Add the eigen folder * Try to fix build issues with eigen * Remove eigen files * Add eigen as submodule * Fix include paths * Exclude eigen files from Python linter * Ignore eigen folders for pydocstyle * Fix C++ linting errors * Fix docs * Fix docs * Exclude eigen directories from doxygen * Update manifest to include eigen * Update build_r to include eigen files * Fix compiler warnings * Store raw feature data as float * Use float for calculating linear coefficients * Remove eigen directory from GLOB * Don't compile linear model code when building R package * Fix doxygen issue * Fix lint issue * Fix lint issue * Remove uneeded code * Restore delected lines * Restore delected lines * Change return type of has_raw to bool * Update docs * Rename some variables and functions for readability * Make tree_learner parameter const in AddScore * Fix style issues * Pass vectors as const reference when setting tree properties * Make temporary storage of serial_tree_learner mutable so we can make the object's methods const * Remove get_raw_size, use num_numeric_features instead * Fix typo * Make contains_nan_ and any_nan_ properties immutable again * Remove data_has_nan_ property of tree * Remove temporary test code * Make linear_tree a dataset param * Fix lint error * Make LinearTreeLearner a separate class * Fix lint errors * Fix lint error * Add linear_tree_learner.o * Simulate omp_get_max_threads if openmp is not available * Update PushOneData to also store raw data. * Cast size to int * Fix bug in ReshapeRaw * Speed up code with multithreading * Use OMP_NUM_THREADS * Speed up with multithreading * Update to use ArrayToString * Fix tests * Fix test * Fix bug introduced in merge * Minor updates * Update docs
2020-12-24 09:01:23 +03:00
include_directories(${EIGEN_DIR})
# See https://gitlab.com/libeigen/eigen/-/blob/master/COPYING.README
add_definitions(-DEIGEN_MPL2_ONLY)
add_definitions(-DEIGEN_DONT_PARALLELIZE)
set(FAST_DOUBLE_PARSER_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/external_libs/fast_double_parser/include")
include_directories(${FAST_DOUBLE_PARSER_INCLUDE_DIR})
set(FMT_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/external_libs/fmt/include")
include_directories(${FMT_INCLUDE_DIR})
if(__BUILD_FOR_R)
[R-package] Use Rprintf for logging in the R package (fixes #1440, fixes #1909) (#2901) * [R-package] started cutting over from custom R-to-C interface to R.h * replaced LGBM_SE with SEXP * fixed error about ocnflicting definitions of length * got linking working * more stuff * eliminated R CMD CHECK note about printing * switched from hard-coded include dir to the one from FindLibR.cmake * cleaned up formatting in FindLibR.cmake * commented-out everything in CI that does not touch R * more changes * trying to get better logs * tried ignoring * added error message to confirm a suspicion * still trying to find R during R CMD CHECK * restore full CI * fixed comment * Update R-package/src/cmake/modules/FindLibR.cmake * changed strategy for finding LIBR_HOME on Windows * Removed 32-bit Windows stuff in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed some duplication in cmake scripts * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * added LIBR_CORE_LIBRARY back * small fixes to CMakeLists * simplified FindLibR.cmake * some fixes for windows * Apply suggestions from code review Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * allowed for directly passing LIBR_EXECUTABLE to FindLibR.cmake * reorganized FindLibR.cmake to catch more cases * clean up inconsistencies in R calls in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed unnecessary log messages * removed unnecessary unset() call Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2020-03-24 03:11:30 +03:00
find_package(LibR REQUIRED)
message(STATUS "LIBR_EXECUTABLE: ${LIBR_EXECUTABLE}")
message(STATUS "LIBR_INCLUDE_DIRS: ${LIBR_INCLUDE_DIRS}")
message(STATUS "LIBR_LIBS_DIR: ${LIBR_LIBS_DIR}")
[R-package] Use Rprintf for logging in the R package (fixes #1440, fixes #1909) (#2901) * [R-package] started cutting over from custom R-to-C interface to R.h * replaced LGBM_SE with SEXP * fixed error about ocnflicting definitions of length * got linking working * more stuff * eliminated R CMD CHECK note about printing * switched from hard-coded include dir to the one from FindLibR.cmake * cleaned up formatting in FindLibR.cmake * commented-out everything in CI that does not touch R * more changes * trying to get better logs * tried ignoring * added error message to confirm a suspicion * still trying to find R during R CMD CHECK * restore full CI * fixed comment * Update R-package/src/cmake/modules/FindLibR.cmake * changed strategy for finding LIBR_HOME on Windows * Removed 32-bit Windows stuff in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed some duplication in cmake scripts * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * added LIBR_CORE_LIBRARY back * small fixes to CMakeLists * simplified FindLibR.cmake * some fixes for windows * Apply suggestions from code review Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * allowed for directly passing LIBR_EXECUTABLE to FindLibR.cmake * reorganized FindLibR.cmake to catch more cases * clean up inconsistencies in R calls in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed unnecessary log messages * removed unnecessary unset() call Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2020-03-24 03:11:30 +03:00
message(STATUS "LIBR_CORE_LIBRARY: ${LIBR_CORE_LIBRARY}")
include_directories(${LIBR_INCLUDE_DIRS})
add_definitions(-DLGB_R_BUILD)
endif()
if(USE_TIMETAG)
add_definitions(-DTIMETAG)
endif()
if(USE_DEBUG)
add_definitions(-DDEBUG)
endif()
2016-08-05 09:06:01 +03:00
if(USE_MPI)
2016-12-07 06:31:31 +03:00
find_package(MPI REQUIRED)
add_definitions(-DUSE_MPI)
2016-08-05 09:06:01 +03:00
else()
add_definitions(-DUSE_SOCKET)
endif()
2016-08-05 09:06:01 +03:00
if(USE_CUDA)
set(CMAKE_CUDA_HOST_COMPILER "${CMAKE_CXX_COMPILER}")
2021-02-01 14:57:47 +03:00
enable_language(CUDA)
set(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
endif()
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
if(USE_OPENMP)
if(APPLE)
find_package(OpenMP)
if(NOT OpenMP_FOUND)
if(USE_HOMEBREW_FALLBACK)
# libomp 15.0+ from brew is keg-only, so have to search in other locations.
# See https://github.com/Homebrew/homebrew-core/issues/112107#issuecomment-1278042927.
execute_process(COMMAND brew --prefix libomp
OUTPUT_VARIABLE HOMEBREW_LIBOMP_PREFIX
OUTPUT_STRIP_TRAILING_WHITESPACE)
set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_LIBOMP_PREFIX}/include")
set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_LIBOMP_PREFIX}/include")
set(OpenMP_C_LIB_NAMES omp)
set(OpenMP_CXX_LIB_NAMES omp)
set(OpenMP_omp_LIBRARY ${HOMEBREW_LIBOMP_PREFIX}/lib/libomp.dylib)
endif()
find_package(OpenMP REQUIRED)
endif()
else()
find_package(OpenMP REQUIRED)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif()
2016-12-07 06:31:31 +03:00
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
if(USE_GPU)
set(BOOST_COMPUTE_HEADER_DIR ${PROJECT_SOURCE_DIR}/external_libs/compute/include)
include_directories(${BOOST_COMPUTE_HEADER_DIR})
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
find_package(OpenCL REQUIRED)
include_directories(${OpenCL_INCLUDE_DIRS})
message(STATUS "OpenCL include directory: " ${OpenCL_INCLUDE_DIRS})
if(WIN32)
2017-05-07 10:31:37 +03:00
set(Boost_USE_STATIC_LIBS ON)
endif()
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
find_package(Boost 1.56.0 COMPONENTS filesystem system REQUIRED)
if(WIN32)
# disable autolinking in boost
add_definitions(-DBOOST_ALL_NO_LIB)
endif()
2017-04-25 07:17:26 +03:00
include_directories(${Boost_INCLUDE_DIRS})
add_definitions(-DUSE_GPU)
endif()
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
if(__INTEGRATE_OPENCL)
if(APPLE)
message(FATAL_ERROR "Integrated OpenCL build is not available on macOS")
else()
include(cmake/IntegratedOpenCL.cmake)
add_definitions(-DUSE_GPU)
endif()
endif()
if(BUILD_CPP_TEST AND MSVC)
# Use /MT flag to statically link the C runtime
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
endif()
if(USE_CUDA)
find_package(CUDAToolkit 11.0 REQUIRED)
include_directories(${CUDAToolkit_INCLUDE_DIRS})
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=${OpenMP_CXX_FLAGS} -Xcompiler=-fPIC -Xcompiler=-Wall")
# reference for mapping of CUDA toolkit component versions to supported architectures ("compute capabilities"):
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
set(CUDA_ARCHS "60" "61" "62" "70" "75")
if(CUDA_VERSION VERSION_GREATER_EQUAL "110")
list(APPEND CUDA_ARCHS "80")
endif()
if(CUDA_VERSION VERSION_GREATER_EQUAL "111")
list(APPEND CUDA_ARCHS "86")
endif()
if(CUDA_VERSION VERSION_GREATER_EQUAL "115")
list(APPEND CUDA_ARCHS "87")
endif()
if(CUDA_VERSION VERSION_GREATER_EQUAL "118")
list(APPEND CUDA_ARCHS "89")
list(APPEND CUDA_ARCHS "90")
endif()
list(POP_BACK CUDA_ARCHS CUDA_LAST_SUPPORTED_ARCH)
list(APPEND CUDA_ARCHS "${CUDA_LAST_SUPPORTED_ARCH}+PTX")
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
if(USE_DEBUG)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -g")
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
else()
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -O3 -lineinfo")
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
endif()
message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
add_definitions(-DUSE_CUDA)
[CUDA] New CUDA version Part 1 (#4630) * new cuda framework * add histogram construction kernel * before removing multi-gpu * new cuda framework * tree learner cuda kernels * single tree framework ready * single tree training framework * remove comments * boosting with cuda * optimize for best split find * data split * move boosting into cuda * parallel synchronize best split point * merge split data kernels * before code refactor * use tasks instead of features as units for split finding * refactor cuda best split finder * fix configuration error with small leaves in data split * skip histogram construction of too small leaf * skip split finding of invalid leaves stop when no leaf to split * support row wise with CUDA * copy data for split by column * copy data from host to CPU by column for data partition * add synchronize best splits for one leaf from multiple blocks * partition dense row data * fix sync best split from task blocks * add support for sparse row wise for CUDA * remove useless code * add l2 regression objective * sparse multi value bin enabled for CUDA * fix cuda ranking objective * support for number of items <= 2048 per query * speedup histogram construction by interleaving global memory access * split optimization * add cuda tree predictor * remove comma * refactor objective and score updater * before use struct * use structure for split information * use structure for leaf splits * return CUDASplitInfo directly after finding best split * split with CUDATree directly * use cuda row data in cuda histogram constructor * clean src/treelearner/cuda * gather shared cuda device functions * put shared CUDA functions into header file * change smaller leaf from <= back to < for consistent result with CPU * add tree predictor * remove useless cuda_tree_predictor * predict on CUDA with pipeline * add global sort algorithms * add global argsort for queries with many items in ranking tasks * remove limitation of maximum number of items per query in ranking * add cuda metrics * fix CUDA AUC * remove debug code * add regression metrics * remove useless file * don't use mask in shuffle reduce * add more regression objectives * fix cuda mape loss add cuda xentropy loss * use template for different versions of BitonicArgSortDevice * add multiclass metrics * add ndcg metric * fix cross entropy objectives and metrics * fix cross entropy and ndcg metrics * add support for customized objective in CUDA * complete multiclass ova for CUDA * separate cuda tree learner * use shuffle based prefix sum * clean up cuda_algorithms.hpp * add copy subset on CUDA * add bagging for CUDA * clean up code * copy gradients from host to device * support bagging without using subset * add support of bagging with subset for CUDAColumnData * add support of bagging with subset for dense CUDARowData * refactor copy sparse subrow * use copy subset for column subset * add reset train data and reset config for CUDA tree learner add deconstructors for cuda tree learner * add USE_CUDA ifdef to cuda tree learner files * check that dataset doesn't contain CUDA tree learner * remove printf debug information * use full new cuda tree learner only when using single GPU * disable all CUDA code when using CPU version * recover main.cpp * add cpp files for multi value bins * update LightGBM.vcxproj * update LightGBM.vcxproj fix lint errors * fix lint errors * fix lint errors * update Makevars fix lint errors * fix the case with 0 feature and 0 bin fix split finding for invalid leaves create cuda column data when loaded from bin file * fix lint errors hide GetRowWiseData when cuda is not used * recover default device type to cpu * fix na_as_missing case fix cuda feature meta information * fix UpdateDataIndexToLeafIndexKernel * create CUDA trees when needed in CUDADataPartition::UpdateTrainScore * add refit by tree for cuda tree learner * fix test_refit in test_engine.py * create set of large bin partitions in CUDARowData * add histogram construction for columns with a large number of bins * add find best split for categorical features on CUDA * add bitvectors for categorical split * cuda data partition split for categorical features * fix split tree with categorical feature * fix categorical feature splits * refactor cuda_data_partition.cu with multi-level templates * refactor CUDABestSplitFinder by grouping task information into struct * pre-allocate space for vector split_find_tasks_ in CUDABestSplitFinder * fix misuse of reference * remove useless changes * add support for path smoothing * virtual destructor for LightGBM::Tree * fix overlapped cat threshold in best split infos * reset histogram pointers in data partition and spllit finder in ResetConfig * comment useless parameter * fix reverse case when na is missing and default bin is zero * fix mfb_is_na and mfb_is_zero and is_single_feature_column * remove debug log * fix cat_l2 when one-hot fix gradient copy when data subset is used * switch shared histogram size according to CUDA version * gpu_use_dp=true when cuda test * revert modification in config.h * fix setting of gpu_use_dp=true in .ci/test.sh * fix linter errors * fix linter error remove useless change * recover main.cpp * separate cuda_exp and cuda * fix ci bash scripts add description for cuda_exp * add USE_CUDA_EXP flag * switch off USE_CUDA_EXP * revert changes in python-packages * more careful separation for USE_CUDA_EXP * fix CUDARowData::DivideCUDAFeatureGroups fix set fields for cuda metadata * revert config.h * fix test settings for cuda experimental version * skip some tests due to unsupported features or differences in implementation details for CUDA Experimental version * fix lint issue by adding a blank line * fix lint errors by resorting imports * fix lint errors by resorting imports * fix lint errors by resorting imports * merge cuda.yml and cuda_exp.yml * update python version in cuda.yml * remove cuda_exp.yml * remove unrelated changes * fix compilation warnings fix cuda exp ci task name * recover task * use multi-level template in histogram construction check split only in debug mode * ignore NVCC related lines in parameter_generator.py * update job name for CUDA tests * apply review suggestions * Update .github/workflows/cuda.yml Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update .github/workflows/cuda.yml Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * update header * remove useless TODOs * remove [TODO(shiyu1994): constrain the split with min_data_in_group] and record in #5062 * #include <LightGBM/utils/log.h> for USE_CUDA_EXP only * fix include order * fix include order * remove extra space * address review comments * add warning when cuda_exp is used together with deterministic * add comment about gpu_use_dp in .ci/test.sh * revert changing order of included headers Co-authored-by: Yu Shi <shiyu1994@qq.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2022-03-23 05:39:23 +03:00
if(NOT DEFINED CMAKE_CUDA_STANDARD)
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
set(CMAKE_CUDA_STANDARD 11)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
endif()
set(
BASE_DEFINES
-DPOWER_FEATURE_WORKGROUPS=12
-DUSE_CONSTANT_BUF=0
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
)
set(
ALLFEATS_DEFINES
${BASE_DEFINES}
-DENABLE_ALL_FEATURES
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
)
set(
FULLDATA_DEFINES
${ALLFEATS_DEFINES}
-DIGNORE_INDICES
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
)
message(STATUS "ALLFEATS_DEFINES: ${ALLFEATS_DEFINES}")
message(STATUS "FULLDATA_DEFINES: ${FULLDATA_DEFINES}")
function(add_histogram hsize hname hadd hconst hdir)
add_library(histo${hsize}${hname} OBJECT src/treelearner/kernels/histogram${hsize}.cu)
set_target_properties(
histo${hsize}${hname}
PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
CUDA_ARCHITECTURES ${CUDA_ARCHS}
)
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
if(hadd)
list(APPEND histograms histo${hsize}${hname})
set(histograms ${histograms} PARENT_SCOPE)
endif()
target_compile_definitions(
histo${hsize}${hname}
PRIVATE
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
-DCONST_HESSIAN=${hconst}
${hdir}
)
endfunction()
foreach(hsize _16_64_256)
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
add_histogram("${hsize}" "_sp_const" "True" "1" "${BASE_DEFINES}")
add_histogram("${hsize}" "_sp" "True" "0" "${BASE_DEFINES}")
add_histogram("${hsize}" "-allfeats_sp_const" "False" "1" "${ALLFEATS_DEFINES}")
add_histogram("${hsize}" "-allfeats_sp" "False" "0" "${ALLFEATS_DEFINES}")
add_histogram("${hsize}" "-fulldata_sp_const" "True" "1" "${FULLDATA_DEFINES}")
add_histogram("${hsize}" "-fulldata_sp" "True" "0" "${FULLDATA_DEFINES}")
endforeach()
endif()
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
#include <xmmintrin.h>
int main() {
int a = 0;
_mm_prefetch(&a, _MM_HINT_NTA);
return 0;
}
" MM_PREFETCH)
if(${MM_PREFETCH})
message(STATUS "Using _mm_prefetch")
add_definitions(-DMM_PREFETCH)
endif()
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
#include <mm_malloc.h>
int main() {
char *a = (char*)_mm_malloc(8, 16);
_mm_free(a);
return 0;
}
" MM_MALLOC)
if(${MM_MALLOC})
message(STATUS "Using _mm_malloc")
add_definitions(-DMM_MALLOC)
endif()
2016-12-07 06:45:55 +03:00
if(UNIX OR MINGW OR CYGWIN)
set(
CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -pthread -Wextra -Wall -Wno-ignored-attributes -Wno-unknown-pragmas -Wno-return-type"
)
if(MINGW)
# ignore this warning: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95353
set(
CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Wno-stringop-overflow"
)
endif()
if(USE_DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
endif()
if(USE_SWIG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing")
endif()
if(NOT USE_OPENMP)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-pragmas -Wno-unused-private-field")
endif()
if(__BUILD_FOR_R AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-cast-function-type")
endif()
2016-12-07 06:31:31 +03:00
endif()
2017-09-10 04:33:46 +03:00
if(WIN32 AND MINGW)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
2017-09-10 04:33:46 +03:00
endif()
# Check if inet_pton is already available, to avoid conflicts with the implementation in LightGBM.
# As of 2022, MinGW started including a definition of inet_pton.
if(WIN32)
include(CheckSymbolExists)
list(APPEND CMAKE_REQUIRED_LIBRARIES "ws2_32")
check_symbol_exists(inet_pton "ws2tcpip.h" WIN_INET_PTON_FOUND)
if(WIN_INET_PTON_FOUND)
add_definitions(-DWIN_HAS_INET_PTON)
endif()
list(REMOVE_ITEM CMAKE_REQUIRED_LIBRARIES "ws2_32")
endif()
2016-12-07 06:31:31 +03:00
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /MP")
if(__BUILD_FOR_R)
# MSVC does not like this commit:
# https://github.com/wch/r-source/commit/fb52ac1a610571fcb8ac92d886b9fefcffaa7d48
#
# and raises "error C3646: 'private_data_c': unknown override specifier"
add_definitions(-DR_LEGACY_RCOMPLEX)
endif()
if(USE_DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Od")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /O2 /Ob2 /Oi /Ot /Oy")
endif()
2016-12-07 06:31:31 +03:00
else()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
if(NOT BUILD_STATIC_LIB)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
endif()
if(NOT USE_DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
endif()
endif()
2016-08-05 09:06:01 +03:00
set(LightGBM_HEADER_DIR ${PROJECT_SOURCE_DIR}/include)
2016-12-07 06:31:31 +03:00
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR})
set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR})
2016-08-05 09:06:01 +03:00
include_directories(${LightGBM_HEADER_DIR})
2016-12-07 06:31:31 +03:00
if(USE_MPI)
include_directories(${MPI_CXX_INCLUDE_PATH})
endif()
2016-12-07 06:31:31 +03:00
set(
LGBM_SOURCES
src/boosting/boosting.cpp
src/boosting/gbdt_model_text.cpp
src/boosting/gbdt_prediction.cpp
src/boosting/gbdt.cpp
src/boosting/prediction_early_stop.cpp
src/boosting/sample_strategy.cpp
src/io/bin.cpp
src/io/config_auto.cpp
src/io/config.cpp
src/io/dataset_loader.cpp
src/io/dataset.cpp
src/io/file_io.cpp
src/io/json11.cpp
src/io/metadata.cpp
src/io/parser.cpp
src/io/train_share_states.cpp
src/io/tree.cpp
src/metric/dcg_calculator.cpp
src/metric/metric.cpp
src/network/linker_topo.cpp
src/network/linkers_mpi.cpp
src/network/linkers_socket.cpp
src/network/network.cpp
src/objective/objective_function.cpp
src/treelearner/data_parallel_tree_learner.cpp
src/treelearner/feature_histogram.cpp
src/treelearner/feature_parallel_tree_learner.cpp
src/treelearner/gpu_tree_learner.cpp
src/treelearner/gradient_discretizer.cpp
src/treelearner/linear_tree_learner.cpp
src/treelearner/serial_tree_learner.cpp
src/treelearner/tree_learner.cpp
src/treelearner/voting_parallel_tree_learner.cpp
src/utils/openmp_wrapper.cpp
)
set(
LGBM_CUDA_SOURCES
src/boosting/cuda/cuda_score_updater.cpp
src/boosting/cuda/cuda_score_updater.cu
src/metric/cuda/cuda_binary_metric.cpp
src/metric/cuda/cuda_pointwise_metric.cpp
src/metric/cuda/cuda_regression_metric.cpp
src/metric/cuda/cuda_pointwise_metric.cu
src/objective/cuda/cuda_binary_objective.cpp
src/objective/cuda/cuda_multiclass_objective.cpp
src/objective/cuda/cuda_rank_objective.cpp
src/objective/cuda/cuda_regression_objective.cpp
src/objective/cuda/cuda_binary_objective.cu
src/objective/cuda/cuda_multiclass_objective.cu
src/objective/cuda/cuda_rank_objective.cu
src/objective/cuda/cuda_regression_objective.cu
src/treelearner/cuda/cuda_best_split_finder.cpp
src/treelearner/cuda/cuda_data_partition.cpp
src/treelearner/cuda/cuda_histogram_constructor.cpp
src/treelearner/cuda/cuda_leaf_splits.cpp
src/treelearner/cuda/cuda_single_gpu_tree_learner.cpp
src/treelearner/cuda/cuda_best_split_finder.cu
src/treelearner/cuda/cuda_data_partition.cu
src/treelearner/cuda/cuda_gradient_discretizer.cu
src/treelearner/cuda/cuda_histogram_constructor.cu
src/treelearner/cuda/cuda_leaf_splits.cu
src/treelearner/cuda/cuda_single_gpu_tree_learner.cu
src/io/cuda/cuda_column_data.cu
src/io/cuda/cuda_tree.cu
src/io/cuda/cuda_column_data.cpp
src/io/cuda/cuda_metadata.cpp
src/io/cuda/cuda_row_data.cpp
src/io/cuda/cuda_tree.cpp
src/cuda/cuda_utils.cpp
src/cuda/cuda_algorithms.cu
2016-12-07 06:31:31 +03:00
)
if(USE_CUDA)
list(APPEND LGBM_SOURCES ${LGBM_CUDA_SOURCES})
endif()
add_library(lightgbm_objs OBJECT ${LGBM_SOURCES})
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
if(BUILD_CLI)
add_executable(lightgbm src/main.cpp src/application/application.cpp)
target_link_libraries(lightgbm PRIVATE lightgbm_objs)
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
set(API_SOURCES "src/c_api.cpp")
# Only build the R part of the library if building for
# use with the R-package
if(__BUILD_FOR_R)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
list(APPEND API_SOURCES "src/lightgbm_R.cpp")
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
add_library(lightgbm_capi_objs OBJECT ${API_SOURCES})
if(BUILD_STATIC_LIB)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
add_library(_lightgbm STATIC)
2020-06-26 22:32:36 +03:00
else()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
add_library(_lightgbm SHARED)
endif()
# R expects libraries of the form <project>.{dll,dylib,so}, not lib_<project>.{dll,dylib,so}
if(__BUILD_FOR_R)
set_target_properties(
_lightgbm
PROPERTIES
PREFIX ""
OUTPUT_NAME "lightgbm"
)
endif()
# LightGBM headers include openmp, cuda, R etc. headers,
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
# thus PUBLIC is required for building _lightgbm_swig target.
target_link_libraries(_lightgbm PUBLIC lightgbm_capi_objs lightgbm_objs)
if(MSVC AND NOT __BUILD_FOR_R)
set_target_properties(_lightgbm PROPERTIES OUTPUT_NAME "lib_lightgbm")
endif()
if(USE_SWIG)
set_property(SOURCE swig/lightgbmlib.i PROPERTY CPLUSPLUS ON)
list(APPEND swig_options -package com.microsoft.ml.lightgbm)
set_property(SOURCE swig/lightgbmlib.i PROPERTY SWIG_FLAGS "${swig_options}")
swig_add_library(_lightgbm_swig LANGUAGE java SOURCES swig/lightgbmlib.i)
swig_link_libraries(_lightgbm_swig _lightgbm)
set_target_properties(
_lightgbm_swig
PROPERTIES
# needed to ensure Linux build does not have lib prefix specified twice, e.g. liblib_lightgbm_swig
PREFIX ""
# needed in some versions of CMake for VS and MinGW builds to ensure output dll has lib prefix
OUTPUT_NAME "lib_lightgbm_swig"
)
if(WIN32)
set(LGBM_SWIG_LIB_DESTINATION_PATH "${LGBM_SWIG_DESTINATION_DIR}/lib_lightgbm_swig.dll")
if(MINGW OR CYGWIN)
set(LGBM_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm.dll")
set(LGBM_SWIG_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm_swig.dll")
else()
set(LGBM_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/Release/lib_lightgbm.dll")
set(LGBM_SWIG_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/Release/lib_lightgbm_swig.dll")
endif()
elseif(APPLE)
set(LGBM_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm.dylib")
set(LGBM_SWIG_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm_swig.jnilib")
set(LGBM_SWIG_LIB_DESTINATION_PATH "${LGBM_SWIG_DESTINATION_DIR}/lib_lightgbm_swig.dylib")
else()
set(LGBM_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm.so")
set(LGBM_SWIG_LIB_SOURCE_PATH "${PROJECT_SOURCE_DIR}/lib_lightgbm_swig.so")
set(LGBM_SWIG_LIB_DESTINATION_PATH "${LGBM_SWIG_DESTINATION_DIR}/lib_lightgbm_swig.so")
endif()
add_custom_command(
TARGET _lightgbm_swig
POST_BUILD
COMMAND "${Java_JAVAC_EXECUTABLE}" -d . java/*.java
COMMAND
"${CMAKE_COMMAND}"
-E
copy_if_different
"${LGBM_LIB_SOURCE_PATH}"
"${LGBM_SWIG_DESTINATION_DIR}"
COMMAND
"${CMAKE_COMMAND}"
-E
copy_if_different
"${LGBM_SWIG_LIB_SOURCE_PATH}"
"${LGBM_SWIG_LIB_DESTINATION_PATH}"
COMMAND "${Java_JAR_EXECUTABLE}" -cf lightgbmlib.jar com
)
endif()
2016-12-07 06:31:31 +03:00
if(USE_MPI)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_link_libraries(lightgbm_objs PUBLIC ${MPI_CXX_LIBRARIES})
endif()
2016-12-07 06:31:31 +03:00
if(USE_OPENMP)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_link_libraries(lightgbm_objs PUBLIC OpenMP::OpenMP_CXX)
# c_api headers also includes OpenMP headers, thus compiling
# lightgbm_capi_objs needs include directory for OpenMP.
# Specifying OpenMP in target_link_libraries will get include directory
# requirements for compilation.
# This uses CMake's Transitive Usage Requirements. Refer to CMake doc:
# https://cmake.org/cmake/help/v3.16/manual/cmake-buildsystem.7.html#transitive-usage-requirements
target_link_libraries(lightgbm_capi_objs PUBLIC OpenMP::OpenMP_CXX)
endif()
endif()
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
if(USE_GPU)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_link_libraries(lightgbm_objs PUBLIC ${OpenCL_LIBRARY} ${Boost_LIBRARIES})
endif()
Initial GPU acceleration support for LightGBM (#368) * add dummy gpu solver code * initial GPU code * fix crash bug * first working version * use asynchronous copy * use a better kernel for root * parallel read histogram * sparse features now works, but no acceleration, compute on CPU * compute sparse feature on CPU simultaneously * fix big bug; add gpu selection; add kernel selection * better debugging * clean up * add feature scatter * Add sparse_threshold control * fix a bug in feature scatter * clean up debug * temporarily add OpenCL kernels for k=64,256 * fix up CMakeList and definition USE_GPU * add OpenCL kernels as string literals * Add boost.compute as a submodule * add boost dependency into CMakeList * fix opencl pragma * use pinned memory for histogram * use pinned buffer for gradients and hessians * better debugging message * add double precision support on GPU * fix boost version in CMakeList * Add a README * reconstruct GPU initialization code for ResetTrainingData * move data to GPU in parallel * fix a bug during feature copy * update gpu kernels * update gpu code * initial port to LightGBM v2 * speedup GPU data loading process * Add 4-bit bin support to GPU * re-add sparse_threshold parameter * remove kMaxNumWorkgroups and allows an unlimited number of features * add feature mask support for skipping unused features * enable kernel cache * use GPU kernels withoug feature masks when all features are used * REAdme. * REAdme. * update README * fix typos (#349) * change compile to gcc on Apple as default * clean vscode related file * refine api of constructing from sampling data. * fix bug in the last commit. * more efficient algorithm to sample k from n. * fix bug in filter bin * change to boost from average output. * fix tests. * only stop training when all classes are finshed in multi-class. * limit the max tree output. change hessian in multi-class objective. * robust tree model loading. * fix test. * convert the probabilities to raw score in boost_from_average of classification. * fix the average label for binary classification. * Add boost_from_average to docs (#354) * don't use "ConvertToRawScore" for self-defined objective function. * boost_from_average seems doesn't work well in binary classification. remove it. * For a better jump link (#355) * Update Python-API.md * for a better jump in page A space is needed between `#` and the headers content according to Github's markdown format [guideline](https://guides.github.com/features/mastering-markdown/) After adding the spaces, we can jump to the exact position in page by click the link. * fixed something mentioned by @wxchan * Update Python-API.md * add FitByExistingTree. * adapt GPU tree learner for FitByExistingTree * avoid NaN output. * update boost.compute * fix typos (#361) * fix broken links (#359) * update README * disable GPU acceleration by default * fix image url * cleanup debug macro * remove old README * do not save sparse_threshold_ in FeatureGroup * add details for new GPU settings * ignore submodule when doing pep8 check * allocate workspace for at least one thread during builing Feature4 * move sparse_threshold to class Dataset * remove duplicated code in GPUTreeLearner::Split * Remove duplicated code in FindBestThresholds and BeforeFindBestSplit * do not rebuild ordered gradients and hessians for sparse features * support feature groups in GPUTreeLearner * Initial parallel learners with GPU support * add option device, cleanup code * clean up FindBestThresholds; add some omp parallel * constant hessian optimization for GPU * Fix GPUTreeLearner crash when there is zero feature * use np.testing.assert_almost_equal() to compare lists of floats in tests * travis for GPU
2017-04-09 16:53:14 +03:00
if(__INTEGRATE_OPENCL)
# targets OpenCL and Boost are added in IntegratedOpenCL.cmake
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
add_dependencies(lightgbm_objs OpenCL Boost)
# variables INTEGRATED_OPENCL_* are set in IntegratedOpenCL.cmake
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_include_directories(lightgbm_objs PRIVATE ${INTEGRATED_OPENCL_INCLUDES})
target_compile_definitions(lightgbm_objs PRIVATE ${INTEGRATED_OPENCL_DEFINITIONS})
target_link_libraries(lightgbm_objs PUBLIC ${INTEGRATED_OPENCL_LIBRARIES} ${CMAKE_DL_LIBS})
endif()
if(USE_CUDA)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
set_target_properties(
lightgbm_objs
PROPERTIES
CUDA_ARCHITECTURES ${CUDA_ARCHS}
CUDA_SEPARABLE_COMPILATION ON
)
set_target_properties(
_lightgbm
PROPERTIES
CUDA_ARCHITECTURES ${CUDA_ARCHS}
CUDA_RESOLVE_DEVICE_SYMBOLS ON
)
[CUDA] New CUDA version Part 1 (#4630) * new cuda framework * add histogram construction kernel * before removing multi-gpu * new cuda framework * tree learner cuda kernels * single tree framework ready * single tree training framework * remove comments * boosting with cuda * optimize for best split find * data split * move boosting into cuda * parallel synchronize best split point * merge split data kernels * before code refactor * use tasks instead of features as units for split finding * refactor cuda best split finder * fix configuration error with small leaves in data split * skip histogram construction of too small leaf * skip split finding of invalid leaves stop when no leaf to split * support row wise with CUDA * copy data for split by column * copy data from host to CPU by column for data partition * add synchronize best splits for one leaf from multiple blocks * partition dense row data * fix sync best split from task blocks * add support for sparse row wise for CUDA * remove useless code * add l2 regression objective * sparse multi value bin enabled for CUDA * fix cuda ranking objective * support for number of items <= 2048 per query * speedup histogram construction by interleaving global memory access * split optimization * add cuda tree predictor * remove comma * refactor objective and score updater * before use struct * use structure for split information * use structure for leaf splits * return CUDASplitInfo directly after finding best split * split with CUDATree directly * use cuda row data in cuda histogram constructor * clean src/treelearner/cuda * gather shared cuda device functions * put shared CUDA functions into header file * change smaller leaf from <= back to < for consistent result with CPU * add tree predictor * remove useless cuda_tree_predictor * predict on CUDA with pipeline * add global sort algorithms * add global argsort for queries with many items in ranking tasks * remove limitation of maximum number of items per query in ranking * add cuda metrics * fix CUDA AUC * remove debug code * add regression metrics * remove useless file * don't use mask in shuffle reduce * add more regression objectives * fix cuda mape loss add cuda xentropy loss * use template for different versions of BitonicArgSortDevice * add multiclass metrics * add ndcg metric * fix cross entropy objectives and metrics * fix cross entropy and ndcg metrics * add support for customized objective in CUDA * complete multiclass ova for CUDA * separate cuda tree learner * use shuffle based prefix sum * clean up cuda_algorithms.hpp * add copy subset on CUDA * add bagging for CUDA * clean up code * copy gradients from host to device * support bagging without using subset * add support of bagging with subset for CUDAColumnData * add support of bagging with subset for dense CUDARowData * refactor copy sparse subrow * use copy subset for column subset * add reset train data and reset config for CUDA tree learner add deconstructors for cuda tree learner * add USE_CUDA ifdef to cuda tree learner files * check that dataset doesn't contain CUDA tree learner * remove printf debug information * use full new cuda tree learner only when using single GPU * disable all CUDA code when using CPU version * recover main.cpp * add cpp files for multi value bins * update LightGBM.vcxproj * update LightGBM.vcxproj fix lint errors * fix lint errors * fix lint errors * update Makevars fix lint errors * fix the case with 0 feature and 0 bin fix split finding for invalid leaves create cuda column data when loaded from bin file * fix lint errors hide GetRowWiseData when cuda is not used * recover default device type to cpu * fix na_as_missing case fix cuda feature meta information * fix UpdateDataIndexToLeafIndexKernel * create CUDA trees when needed in CUDADataPartition::UpdateTrainScore * add refit by tree for cuda tree learner * fix test_refit in test_engine.py * create set of large bin partitions in CUDARowData * add histogram construction for columns with a large number of bins * add find best split for categorical features on CUDA * add bitvectors for categorical split * cuda data partition split for categorical features * fix split tree with categorical feature * fix categorical feature splits * refactor cuda_data_partition.cu with multi-level templates * refactor CUDABestSplitFinder by grouping task information into struct * pre-allocate space for vector split_find_tasks_ in CUDABestSplitFinder * fix misuse of reference * remove useless changes * add support for path smoothing * virtual destructor for LightGBM::Tree * fix overlapped cat threshold in best split infos * reset histogram pointers in data partition and spllit finder in ResetConfig * comment useless parameter * fix reverse case when na is missing and default bin is zero * fix mfb_is_na and mfb_is_zero and is_single_feature_column * remove debug log * fix cat_l2 when one-hot fix gradient copy when data subset is used * switch shared histogram size according to CUDA version * gpu_use_dp=true when cuda test * revert modification in config.h * fix setting of gpu_use_dp=true in .ci/test.sh * fix linter errors * fix linter error remove useless change * recover main.cpp * separate cuda_exp and cuda * fix ci bash scripts add description for cuda_exp * add USE_CUDA_EXP flag * switch off USE_CUDA_EXP * revert changes in python-packages * more careful separation for USE_CUDA_EXP * fix CUDARowData::DivideCUDAFeatureGroups fix set fields for cuda metadata * revert config.h * fix test settings for cuda experimental version * skip some tests due to unsupported features or differences in implementation details for CUDA Experimental version * fix lint issue by adding a blank line * fix lint errors by resorting imports * fix lint errors by resorting imports * fix lint errors by resorting imports * merge cuda.yml and cuda_exp.yml * update python version in cuda.yml * remove cuda_exp.yml * remove unrelated changes * fix compilation warnings fix cuda exp ci task name * recover task * use multi-level template in histogram construction check split only in debug mode * ignore NVCC related lines in parameter_generator.py * update job name for CUDA tests * apply review suggestions * Update .github/workflows/cuda.yml Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update .github/workflows/cuda.yml Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * update header * remove useless TODOs * remove [TODO(shiyu1994): constrain the split with min_data_in_group] and record in #5062 * #include <LightGBM/utils/log.h> for USE_CUDA_EXP only * fix include order * fix include order * remove extra space * address review comments * add warning when cuda_exp is used together with deterministic * add comment about gpu_use_dp in .ci/test.sh * revert changing order of included headers Co-authored-by: Yu Shi <shiyu1994@qq.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2022-03-23 05:39:23 +03:00
if(BUILD_CLI)
set_target_properties(
lightgbm
PROPERTIES
CUDA_ARCHITECTURES ${CUDA_ARCHS}
CUDA_RESOLVE_DEVICE_SYMBOLS ON
)
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
# histograms are list of object libraries. Linking object library to other
# object libraries only gets usage requirements, the linked objects won't be
# used. Thus we have to call target_link_libraries on final targets here.
if(BUILD_CLI)
target_link_libraries(lightgbm PRIVATE ${histograms})
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_link_libraries(_lightgbm PRIVATE ${histograms})
endif()
[GPU] Add support for CUDA-based GPU build (#3160) * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * Initial CUDA work * redirect log to python console (#3090) * redir log to python console * fix pylint * Apply suggestions from code review * Update basic.py * Apply suggestions from code review Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Update c_api.h * Apply suggestions from code review * Apply suggestions from code review * super-minor: better wording Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> * re-order includes (fixes #3132) (#3133) * Revert "re-order includes (fixes #3132) (#3133)" (#3153) This reverts commit 656d2676c2174781c91747ba158cb6d27f4cacbd. * Missing change from previous rebase * Minor cleanup and removal of development scripts. * Only set gpu_use_dp on by default for CUDA. Other minor change. * Fix python lint indentation problem. * More python lint issues. * Big lint cleanup - more to come. * Another large lint cleanup - more to come. * Even more lint cleanup. * Minor cleanup so less differences in code. * Revert is_use_subset changes * Another rebase from master to fix recent conflicts. * More lint. * Simple code cleanup - add & remove blank lines, revert unneccessary format changes, remove added dead code. * Removed parameters added for CUDA and various bug fix. * Yet more lint and unneccessary changes. * Revert another change. * Removal of unneccessary code. * temporary appveyor.yml for building and testing * Remove return value in ReSize * Removal of unused variables. * Code cleanup from reviewers suggestions. * Removal of FIXME comments and unused defines. * More reviewers comments cleanup. * More reviewers comments cleanup. * More reviewers comments cleanup. * Fix config variables. * Attempt to fix check-docs failure * Update Paramster.rst for num_gpu * Removing test appveyor.yml * Add ƒCUDA_RESOLVE_DEVICE_SYMBOLS to libraries to fix linking issue. * Fixed handling of data elements less than 2K. * More reviewers comments cleanup. * Removal of TODO and fix printing of int64_t * Add cuda change for CI testing and remove cuda from device_type in python. * Missed one change form previous check-in * Removal AdditionConfig and fix settings. * Limit number of GPUs to one for now in CUDA. * Update Parameters.rst for previous check-in * Whitespace removal. * Cleanup unused code. * Changed uint/ushort/ulong to unsigned int/short/long to help Windows based CUDA compiler work. * Lint change from previous check-in. * Changes based on reviewers comments. * More reviewer comment changes. * Adding warning for is_sparse. Revert tmp_subset code. Only return FeatureGroupData if not is_multi_val_ * Fix so that CUDA code will compile even if you enable the SCORE_T_USE_DOUBLE define. * Reviewer comment cleanup. * Replace warning with Log message. Removal of some of the USE_CUDA. Fix typo and removal of pragma once. * Remove PRINT debug for CUDA code. * Allow to use of multiple GPUs for CUDA. * More multi-GPUs enablement for CUDA. * More code cleanup based on reviews comments. * Update docs with latest config changes. Co-authored-by: Gordon Fossum <fossum@us.ibm.com> Co-authored-by: ChipKerchner <ckerchne@linux.vnet.ibm.com> Co-authored-by: Guolin Ke <guolin.ke@outlook.com> Co-authored-by: Nikita Titov <nekit94-08@mail.ru> Co-authored-by: StrikerRUS <nekit94-12@hotmail.com> Co-authored-by: James Lamb <jaylamb20@gmail.com>
2020-09-20 08:35:01 +03:00
if(WIN32)
if(MINGW OR CYGWIN)
target_link_libraries(lightgbm_objs PUBLIC ws2_32 iphlpapi)
endif()
endif()
2016-08-05 09:06:01 +03:00
if(__BUILD_FOR_R)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
# utils/log.h and capi uses R headers, thus both object libraries need to link
# with R lib.
if(MSVC)
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
set(R_LIB ${LIBR_MSVC_CORE_LIBRARY})
else()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
set(R_LIB ${LIBR_CORE_LIBRARY})
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
target_link_libraries(lightgbm_objs PUBLIC ${R_LIB})
target_link_libraries(lightgbm_capi_objs PUBLIC ${R_LIB})
endif()
[R-package] Use Rprintf for logging in the R package (fixes #1440, fixes #1909) (#2901) * [R-package] started cutting over from custom R-to-C interface to R.h * replaced LGBM_SE with SEXP * fixed error about ocnflicting definitions of length * got linking working * more stuff * eliminated R CMD CHECK note about printing * switched from hard-coded include dir to the one from FindLibR.cmake * cleaned up formatting in FindLibR.cmake * commented-out everything in CI that does not touch R * more changes * trying to get better logs * tried ignoring * added error message to confirm a suspicion * still trying to find R during R CMD CHECK * restore full CI * fixed comment * Update R-package/src/cmake/modules/FindLibR.cmake * changed strategy for finding LIBR_HOME on Windows * Removed 32-bit Windows stuff in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update CMakeLists.txt Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed some duplication in cmake scripts * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * added LIBR_CORE_LIBRARY back * small fixes to CMakeLists * simplified FindLibR.cmake * some fixes for windows * Apply suggestions from code review Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * allowed for directly passing LIBR_EXECUTABLE to FindLibR.cmake * reorganized FindLibR.cmake to catch more cases * clean up inconsistencies in R calls in FindLibR.cmake * Update R-package/src/cmake/modules/FindLibR.cmake Co-Authored-By: Nikita Titov <nekit94-08@mail.ru> * removed unnecessary log messages * removed unnecessary unset() call Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2020-03-24 03:11:30 +03:00
#-- Google C++ tests
if(BUILD_CPP_TEST)
find_package(GTest CONFIG)
if(NOT GTEST_FOUND)
message(STATUS "Did not find Google Test in the system root. Fetching Google Test now...")
include(FetchContent)
FetchContent_Declare(
googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG v1.14.0
)
FetchContent_MakeAvailable(googletest)
add_library(GTest::GTest ALIAS gtest)
endif()
set(LightGBM_TEST_HEADER_DIR ${PROJECT_SOURCE_DIR}/tests/cpp_tests)
include_directories(${LightGBM_TEST_HEADER_DIR})
set(
CPP_TEST_SOURCES
tests/cpp_tests/test_array_args.cpp
tests/cpp_tests/test_arrow.cpp
tests/cpp_tests/test_byte_buffer.cpp
tests/cpp_tests/test_chunked_array.cpp
tests/cpp_tests/test_common.cpp
tests/cpp_tests/test_main.cpp
tests/cpp_tests/test_serialize.cpp
tests/cpp_tests/test_single_row.cpp
tests/cpp_tests/test_stream.cpp
tests/cpp_tests/testutils.cpp
)
if(MSVC)
2023-12-19 06:20:17 +03:00
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /permissive-")
endif()
cmake: use object library to avoid duplicate compilation. (#4489) * cmake: use object library to avoid duplicate compilation. * debug: verbose make log for building r package. * Include /usr/local/include for AppleClang. * Revert "debug: verbose make log for building r package." * update cmake comment and fix indentation * debug cmake USE_DEBUG. * Revert "debug cmake USt E_DEBUG." * Add -fPIC for building shared library. * Always set -fPIC for non MSVC compiler. * debug: print exception in setup.py * debug: print cmake output for vs build. * debug: set opencl related target_xxx on lightgbm_objs. * Define compile definitions, link libraries on lightgbm_objs. * Add PUBLIC to target_link_libraries to expose library dependency. * Use target_link_libraries on object library. This should propagate usage requirements. * Fix CUDA linking. Linking object library (lightgbm_objs) to object library (histograms) does not linked objects. * Use PUBLIC link for lightgbm lib. * Set cuda related properties on final targets. * Remove debugging changes. Revert "debug: print exception in setup.py" Revert "debug: print cmake output for vs build." etc. * Remove -D_lightgbm_EXPORTS. * Revert to add -fPIC only for NOT USE_DEBUG. * Enable PIC for shared lib. * Fix enable PIC. * Use -fPIC for shared lib. * testlightgbm depends only on object files. * tweak build for R. * Try to remove OpenMP related include dir settings. * link with openmp for capi object library. * Use PUBLIC for _lightgbm target_link_libraries. * Try removing exports definition. * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * fix typo Co-authored-by: Nikita Titov <nekit94-08@mail.ru> * Add some comments for cmake code. * Try to fix cmake warnings CUDA. * revert accidentally commited R-package path change. * Try to fix cmake CUDA warnings, set for _lightgbm target. * Try to fix cmake CUDA warnings, set for lightgbm target. * empty commit to trigger ci Co-authored-by: Nikita Titov <nekit94-08@mail.ru>
2021-11-10 04:45:21 +03:00
add_executable(testlightgbm ${CPP_TEST_SOURCES})
target_link_libraries(testlightgbm PRIVATE lightgbm_objs lightgbm_capi_objs GTest::GTest)
endif()
if(BUILD_CLI)
install(
TARGETS lightgbm
RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin
)
endif()
if(__BUILD_FOR_PYTHON)
set(CMAKE_INSTALL_PREFIX "lightgbm")
endif()
# The macOS linker puts an absolute path to linked libraries in lib_lightgbm.dylib.
# This block overrides that information for LightGBM's OpenMP dependency, to allow
# finding that library in more places.
#
# This reduces the risk of runtime issues resulting from multiple {libgomp,libiomp,libomp}.dylib being loaded.
#
if(APPLE AND USE_OPENMP AND NOT BUILD_STATIC_LIB)
# store path to {libgomp,libiomp,libomp}.dylib found at build time in a variable
get_target_property(
OpenMP_LIBRARY_LOCATION
OpenMP::OpenMP_CXX
INTERFACE_LINK_LIBRARIES
)
# get just the filename of that path
# (to deal with the possibility that it might be 'libomp.dylib' or 'libgomp.dylib' or 'libiomp.dylib')
get_filename_component(
OpenMP_LIBRARY_NAME
${OpenMP_LIBRARY_LOCATION}
NAME
)
# get directory of that path
get_filename_component(
OpenMP_LIBRARY_DIR
${OpenMP_LIBRARY_LOCATION}
DIRECTORY
)
# get exact name of the library in a variable
get_target_property(
__LIB_LIGHTGBM_OUTPUT_NAME
_lightgbm
OUTPUT_NAME
)
if(NOT __LIB_LIGHTGBM_OUTPUT_NAME)
set(__LIB_LIGHTGBM_OUTPUT_NAME "lib_lightgbm")
endif()
if(CMAKE_SHARED_LIBRARY_SUFFIX_CXX)
set(
__LIB_LIGHTGBM_FILENAME "${__LIB_LIGHTGBM_OUTPUT_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX_CXX}"
CACHE INTERNAL "lightgbm shared library filename"
)
else()
set(
__LIB_LIGHTGBM_FILENAME "${__LIB_LIGHTGBM_OUTPUT_NAME}.dylib"
CACHE INTERNAL "lightgbm shared library filename"
)
endif()
# Override the absolute path to OpenMP with a relative one using @rpath.
#
# This also ensures that if a {libgomp,libiomp,libomp}.dylib has already been loaded, it'll just use that.
add_custom_command(
TARGET _lightgbm
POST_BUILD
COMMAND
install_name_tool
-change
${OpenMP_LIBRARY_LOCATION}
"@rpath/${OpenMP_LIBRARY_NAME}"
"${__LIB_LIGHTGBM_FILENAME}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Replacing hard-coded OpenMP install_name with '@rpath/${OpenMP_LIBRARY_NAME}'..."
)
# add RPATH entries to ensure the loader looks in the following, in the following order:
#
# - (R-only) ${LIBR_LIBS_DIR} (wherever R for macOS stores vendored third-party libraries)
# - ${OpenMP_LIBRARY_DIR} (wherever find_package(OpenMP) found OpenMP at build time)
# - /opt/homebrew/opt/libomp/lib (where 'brew install' / 'brew link' puts libomp.dylib)
# - /opt/local/lib/libomp (where 'port install' puts libomp.dylib)
#
# with some compilers, OpenMP ships with the compiler (e.g. libgomp with gcc)
list(APPEND __omp_install_rpaths "${OpenMP_LIBRARY_DIR}")
# with clang, libomp doesn't ship with the compiler and might be supplied separately
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
list(
APPEND __omp_install_rpaths
"/opt/homebrew/opt/libomp/lib"
"/opt/local/lib/libomp"
)
# It appears that CRAN's macOS binaries compiled with -fopenmp have install names
# of the form:
#
# /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/lib/libomp.dylib
#
# That corresponds to the libomp.dylib that ships with the R framework for macOS, available
# from https://cran.r-project.org/bin/macosx/.
#
# That absolute-path install name leads to that library being loaded unconditionally.
#
# That can result in e.g. 'library(data.table)' loading R's libomp.dylib and 'library(lightgbm)' loading
# Homebrew's. Having 2 loaded in the same process can lead to segfaults and unpredictable behavior.
#
# This can't be easily avoided by forcing R-package builds in LightGBM to use R's libomp.dylib
# at build time... LightGBM's CMake uses find_package(OpenMP), and R for macOS only provides the
# library, not CMake config files for it.
#
# Best we can do, to allow CMake-based builds of the R-package here to continue to work
# alongside CRAN-prepared binaries of other packages with OpenMP dependencies, is to
# ensure that R's library directory is the first place the loader searches for
# libomp.dylib when clang is used.
#
# ref: https://github.com/microsoft/LightGBM/issues/6628
#
if(__BUILD_FOR_R)
list(PREPEND __omp_install_rpaths "${LIBR_LIBS_DIR}")
endif()
endif()
set_target_properties(
_lightgbm
PROPERTIES
BUILD_WITH_INSTALL_RPATH TRUE
INSTALL_RPATH "${__omp_install_rpaths}"
INSTALL_RPATH_USE_LINK_PATH FALSE
)
endif()
install(
TARGETS _lightgbm
RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin
LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
)
2017-01-24 11:53:58 +03:00
if(INSTALL_HEADERS)
install(
DIRECTORY ${LightGBM_HEADER_DIR}/LightGBM
DESTINATION ${CMAKE_INSTALL_PREFIX}/include
)
install(
FILES ${FAST_DOUBLE_PARSER_INCLUDE_DIR}/fast_double_parser.h
DESTINATION ${CMAKE_INSTALL_PREFIX}/include/LightGBM/utils
)
install(
DIRECTORY ${FMT_INCLUDE_DIR}/
DESTINATION ${CMAKE_INSTALL_PREFIX}/include/LightGBM/utils
FILES_MATCHING PATTERN "*.h"
)
endif()