From 7bf81f8c6d7cd82db74dea4eb6272e08fbeb39b1 Mon Sep 17 00:00:00 2001 From: Nikita Titov Date: Sun, 28 Mar 2021 15:52:42 +0300 Subject: [PATCH] [ci] apply cpplint to cpp tests (#4092) * Update chunked_array.hpp * Update ChunkedArray_API_extensions.i * Update StringArray.i * apply cpplint to cpp tests * Update test_chunked_array to please cpplint (#4121) * Update test_chunked_array to please cpplint * Simplify var name * Add comment Co-authored-by: Alberto Ferreira --- .ci/test.sh | 2 +- include/LightGBM/utils/chunked_array.hpp | 2 +- swig/ChunkedArray_API_extensions.i | 4 ++ swig/StringArray.i | 2 + tests/cpp_test/test_chunked_array.cpp | 61 ++++++++++++------------ 5 files changed, 38 insertions(+), 33 deletions(-) diff --git a/.ci/test.sh b/.ci/test.sh index 66745a0b6..35c7da841 100755 --- a/.ci/test.sh +++ b/.ci/test.sh @@ -64,7 +64,7 @@ if [[ $TASK == "lint" ]]; then echo "Linting R code" Rscript ${BUILD_DIRECTORY}/.ci/lint_r_code.R ${BUILD_DIRECTORY} || exit -1 echo "Linting C++ code" - cpplint --filter=-build/c++11,-build/include_subdir,-build/header_guard,-whitespace/line_length --recursive ./src ./include ./R-package ./swig || exit -1 + cpplint --filter=-build/c++11,-build/include_subdir,-build/header_guard,-whitespace/line_length --recursive ./src ./include ./R-package ./swig ./tests || exit -1 exit 0 fi diff --git a/include/LightGBM/utils/chunked_array.hpp b/include/LightGBM/utils/chunked_array.hpp index 6160dafa0..59c9684e9 100644 --- a/include/LightGBM/utils/chunked_array.hpp +++ b/include/LightGBM/utils/chunked_array.hpp @@ -239,7 +239,7 @@ class ChunkedArray { _chunks.push_back(new (std::nothrow) T[_chunk_size]); // Check memory allocation success: - if (!_chunks[_chunks.size()-1]) { + if (!_chunks[_chunks.size() - 1]) { release(); Log::Fatal("Memory exhausted! Cannot allocate new ChunkedArray chunk."); } diff --git a/swig/ChunkedArray_API_extensions.i b/swig/ChunkedArray_API_extensions.i index 4161169ac..32cc346a1 100644 --- a/swig/ChunkedArray_API_extensions.i +++ b/swig/ChunkedArray_API_extensions.i @@ -1,3 +1,7 @@ +/*! + * Copyright (c) 2021 Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See LICENSE file in the project root for license information. + */ /** * Wrap chunked_array.hpp class for SWIG usage. * diff --git a/swig/StringArray.i b/swig/StringArray.i index 12f1a5d2b..826a90ba8 100644 --- a/swig/StringArray.i +++ b/swig/StringArray.i @@ -1,6 +1,8 @@ /*! * Copyright (c) 2020 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. + * + * Author: Alberto Ferreira */ /** * This wraps the StringArray.hpp class for SWIG usage, diff --git a/tests/cpp_test/test_chunked_array.cpp b/tests/cpp_test/test_chunked_array.cpp index e7d155566..9bfd85729 100644 --- a/tests/cpp_test/test_chunked_array.cpp +++ b/tests/cpp_test/test_chunked_array.cpp @@ -36,14 +36,12 @@ testing::AssertionResult are_vectors_equal(const std::vector &a, const std::v class ChunkedArrayTest : public testing::Test { protected: - void SetUp() override { - } - void add_items_to_array(const std::vector &vec, ChunkedArray &ca) { - for (auto v: vec) { - ca.add(v); + void add_items_to_array(const std::vector &vec, ChunkedArray *ca) { + for (auto v : vec) { + ca->add(v); } } @@ -52,7 +50,7 @@ class ChunkedArrayTest : public testing::Test { it would yield the same contents as vec */ testing::AssertionResult coalesced_output_equals_vec(const ChunkedArray &ca, const std::vector &vec, - const bool all_addresses=false) { + const bool all_addresses = false) { std::vector out(vec.size()); ca.coalesce_to(out.data(), all_addresses); return are_vectors_equal(out, vec); @@ -63,7 +61,7 @@ class ChunkedArrayTest : public testing::Test { const size_t CHUNK_SIZE = 3; const size_t OUT_OF_BOUNDS_OFFSET = 4; - ChunkedArray ca_ = ChunkedArray(CHUNK_SIZE); // ca_ = ChunkedArray(CHUNK_SIZE); //(i / CHUNK_SIZE) + 1; ASSERT_EQ(ca_.get_chunks_count(), expected_chunks) << "with " << i << " add() call(s) " << "and CHUNK_SIZE==" << CHUNK_SIZE << "."; } @@ -127,7 +125,7 @@ TEST_F(ChunkedArrayTest, getAddCount) { */ TEST_F(ChunkedArrayTest, coalesceTo) { std::vector out(REF_VEC.size()); - add_items_to_array(REF_VEC, ca_); + add_items_to_array(REF_VEC, &ca_); ca_.coalesce_to(out.data()); @@ -139,13 +137,13 @@ TEST_F(ChunkedArrayTest, coalesceTo) { */ TEST_F(ChunkedArrayTest, clear) { const std::vector ref_vec2 = {1, 2, 5, -1}; - add_items_to_array(REF_VEC, ca_); + add_items_to_array(REF_VEC, &ca_); // Start with some content: ASSERT_TRUE(coalesced_output_equals_vec(ca_, REF_VEC)); // Clear & re-use: ca_.clear(); - add_items_to_array(ref_vec2, ca_); + add_items_to_array(ref_vec2, &ca_); // Output should match new content: ASSERT_TRUE(coalesced_output_equals_vec(ca_, ref_vec2)); @@ -155,8 +153,8 @@ TEST_F(ChunkedArrayTest, clear) { Ensure ChunkedArray is safe against double-frees. */ TEST_F(ChunkedArrayTest, doubleFreeSafe) { - ca_.release(); // Cannot be used any longer from now on. - ca_.release(); // Ensure we don't segfault. + ca_.release(); // Cannot be used any longer from now on. + ca_.release(); // Ensure we don't segfault. SUCCEED(); } @@ -165,12 +163,12 @@ TEST_F(ChunkedArrayTest, doubleFreeSafe) { Ensure size computations in the getters are correct. */ TEST_F(ChunkedArrayTest, totalArraySizeMatchesLastChunkAddCount) { - add_items_to_array(REF_VEC, ca_); + add_items_to_array(REF_VEC, &ca_); const size_t first_chunks_add_count = (ca_.get_chunks_count() - 1) * ca_.get_chunk_size(); const size_t last_chunk_add_count = ca_.get_last_chunk_add_count(); - EXPECT_EQ(first_chunks_add_count, int(REF_VEC.size()/CHUNK_SIZE) * CHUNK_SIZE); + EXPECT_EQ(first_chunks_add_count, static_cast(REF_VEC.size() / CHUNK_SIZE) * CHUNK_SIZE); EXPECT_EQ(last_chunk_add_count, REF_VEC.size() % CHUNK_SIZE); EXPECT_EQ(first_chunks_add_count + last_chunk_add_count, ca_.get_add_count()); } @@ -185,10 +183,10 @@ TEST_F(ChunkedArrayTest, totalArraySizeMatchesLastChunkAddCount) { This would occur if there was an improper data layout with the chunks. */ TEST_F(ChunkedArrayTest, dataLayoutTestThroughGetitem) { - add_items_to_array(REF_VEC, ca_); + add_items_to_array(REF_VEC, &ca_); for (size_t i = 0, chunk = 0, in_chunk_idx = 0; i < REF_VEC.size(); ++i) { - int value = ca_.getitem(chunk, in_chunk_idx, -1); // -1 works as sentinel value (bad layout found) + int value = ca_.getitem(chunk, in_chunk_idx, -1); // -1 works as sentinel value (bad layout found) EXPECT_EQ(value, REF_VEC[i]) << " for address (chunk,in_chunk_idx) = (" << chunk << "," << in_chunk_idx << ")"; @@ -209,7 +207,7 @@ TEST_F(ChunkedArrayTest, dataLayoutTestThroughGetitem) { We also gradually add more chunks to the ChunkedArray and re-run more trials to ensure the valid/invalid addresses are updated. - With each valid update we add to a "memory" vector the history of all the insertions. + With each valid update we add to a "memory" vector the latest inserted values. This is used at the end to ensure all values were stored properly, including after value overrides. */ @@ -218,8 +216,9 @@ TEST_F(ChunkedArrayTest, testDataLayoutWithAdvancedInsertionAPI) { const size_t MAX_IN_CHUNK_SEARCH_IDX = 2 * CHUNK_SIZE; // Number of trials for each new ChunkedArray configuration. Pass 100 times over the search space: const size_t N_TRIALS = MAX_CHUNKS_SEARCH * MAX_IN_CHUNK_SEARCH_IDX * 100; - std::vector overriden_trials_values(MAX_CHUNKS_SEARCH * CHUNK_SIZE); - std::vector overriden_trials_mask(MAX_CHUNKS_SEARCH * CHUNK_SIZE, false); + const int INVALID = -1; // A negative value signaling the requested value lives in an invalid address. + const int UNITIALIZED = -99; // A negative value to signal this was never updated. + std::vector ref_values(MAX_CHUNKS_SEARCH * CHUNK_SIZE, UNITIALIZED); // Memorize latest inserted values. // Each outer loop iteration changes the test by adding +1 chunk. We start with 1 chunk only: for (size_t chunks = 1; chunks < MAX_CHUNKS_SEARCH; ++chunks) { @@ -239,24 +238,24 @@ TEST_F(ChunkedArrayTest, testDataLayoutWithAdvancedInsertionAPI) { // If at valid address, check that the stored value is correct & remember it for the future: if (valid_address) { // Check the just-stored value with getitem(): - EXPECT_EQ(ca_.getitem(trial_chunk, trial_in_chunk_idx, -1), trial_value); // -1 is the sentinel value. + EXPECT_EQ(ca_.getitem(trial_chunk, trial_in_chunk_idx, INVALID), trial_value); // Also store the just-stored value for future tracking: - overriden_trials_values[trial_chunk*CHUNK_SIZE + trial_in_chunk_idx] = trial_value; - overriden_trials_mask[trial_chunk*CHUNK_SIZE + trial_in_chunk_idx] = true; + ref_values[trial_chunk * CHUNK_SIZE + trial_in_chunk_idx] = trial_value; } } - ca_.new_chunk(); // Just finished a round of trials. Now add a new chunk. Valid addresses will be expanded. + ca_.new_chunk(); // Just finished a round of trials. Now add a new chunk. Valid addresses will be expanded. } // Final check: ensure even with overrides, all valid insertions store the latest value at that address: - std::vector coalesced_out(MAX_CHUNKS_SEARCH * CHUNK_SIZE, -1); - ca_.coalesce_to(coalesced_out.data(), true); // Export all valid addresses. - for (size_t i = 0; i < overriden_trials_mask.size(); ++i) { - if (overriden_trials_mask[i]) { - EXPECT_EQ(ca_.getitem(i/CHUNK_SIZE, i % CHUNK_SIZE, -1), overriden_trials_values[i]); - EXPECT_EQ(coalesced_out[i], overriden_trials_values[i]); + std::vector coalesced_out(MAX_CHUNKS_SEARCH * CHUNK_SIZE, UNITIALIZED); + ca_.coalesce_to(coalesced_out.data(), true); // Export all valid addresses. + for (size_t i = 0; i < ref_values.size(); ++i) { + if (ref_values[i] != UNITIALIZED) { + // Test in 2 ways that the values are correctly laid out in memory: + EXPECT_EQ(ca_.getitem(i / CHUNK_SIZE, i % CHUNK_SIZE, INVALID), ref_values[i]); + EXPECT_EQ(coalesced_out[i], ref_values[i]); } } }