remove redundant whitespaces
This commit is contained in:
Ilya Chernov 2022-09-11 10:08:58 +03:00 коммит произвёл GitHub
Родитель 3d4e08e1d8
Коммит 952458a9e0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
6 изменённых файлов: 8 добавлений и 8 удалений

Просмотреть файл

@ -508,7 +508,7 @@ namespace LightGBM {
const double max_sparse_rate =
static_cast<double>(cnt_in_bin[most_freq_bin_]) / total_sample_cnt;
// When most_freq_bin_ != default_bin_, there are some additional data loading costs.
// so use most_freq_bin_ = default_bin_ when there is not so sparse
// so use most_freq_bin_ = default_bin_ when there is not so sparse
if (most_freq_bin_ != default_bin_ && max_sparse_rate < kSparseThreshold) {
most_freq_bin_ = default_bin_;
}
@ -705,7 +705,7 @@ namespace LightGBM {
return new MultiValSparseBin<uint32_t, uint32_t>(
num_data, num_bin, estimate_element_per_row);
}
} else {
} else {
if (num_bin <= 256) {
return new MultiValSparseBin<size_t, uint8_t>(
num_data, num_bin, estimate_element_per_row);

Просмотреть файл

@ -248,7 +248,7 @@ class TcpSocket {
}
inline bool Connect(const char *url, int port) {
sockaddr_in server_addr = GetAddress(url, port);
sockaddr_in server_addr = GetAddress(url, port);
if (connect(sockfd_, reinterpret_cast<const sockaddr*>(&server_addr), sizeof(sockaddr_in)) == 0) {
return true;
}

Просмотреть файл

@ -129,7 +129,7 @@ class BinaryLogloss: public ObjectiveFunction {
// calculate gradients and hessians
const double response = -label * sigmoid_ / (1.0f + std::exp(label * sigmoid_ * score[i]));
const double abs_response = fabs(response);
gradients[i] = static_cast<score_t>(response * label_weight * weights_[i]);
gradients[i] = static_cast<score_t>(response * label_weight * weights_[i]);
hessians[i] = static_cast<score_t>(abs_response * (sigmoid_ - abs_response) * label_weight * weights_[i]);
}
}

Просмотреть файл

@ -346,7 +346,7 @@ class CUDADataPartition {
data_size_t* cuda_data_indices_;
/*! \brief start position of each leaf in cuda_data_indices_ */
data_size_t* cuda_leaf_data_start_;
/*! \brief end position of each leaf in cuda_data_indices_ */
/*! \brief end position of each leaf in cuda_data_indices_ */
data_size_t* cuda_leaf_data_end_;
/*! \brief number of data in each leaf */
data_size_t* cuda_leaf_num_data_;

Просмотреть файл

@ -266,7 +266,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
ptr_pinned_gradients_ = queue_.enqueue_map_buffer(pinned_gradients_, boost::compute::command_queue::map_write_invalidate_region,
0, allocated_num_data_ * sizeof(score_t));
pinned_hessians_ = boost::compute::buffer(); // deallocate
pinned_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
pinned_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
ordered_hessians_.data());
ptr_pinned_hessians_ = queue_.enqueue_map_buffer(pinned_hessians_, boost::compute::command_queue::map_write_invalidate_region,
@ -277,7 +277,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
device_gradients_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr);
device_hessians_ = boost::compute::buffer(); // deallocate
device_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
device_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr);
// allocate feature mask, for disabling some feature-groups' histogram calculation
feature_masks_.resize(num_dense_feature4_ * dword_features_);

Просмотреть файл

@ -875,7 +875,7 @@ class AdvancedLeafConstraints : public IntermediateLeafConstraints {
// for example when adding a constraints cstr2 on thresholds [1:2),
// on an existing constraints cstr1 on thresholds [0, +inf),
// the thresholds and constraints must become
// [0, 1, 2] and [cstr1, cstr2, cstr1]
// [0, 1, 2] and [cstr1, cstr2, cstr1]
// so since we loop through thresholds only once,
// the previous constraint that still applies needs to be recorded
double previous_constraint = use_max_operator