[ci] apply cpplint to cpp tests (#4092)

* Update chunked_array.hpp

* Update ChunkedArray_API_extensions.i

* Update StringArray.i

* apply cpplint to cpp tests

* Update test_chunked_array to please cpplint (#4121)

* Update test_chunked_array to please cpplint

* Simplify var name

* Add comment

Co-authored-by: Alberto Ferreira <AlbertoEAF@users.noreply.github.com>
This commit is contained in:
Nikita Titov 2021-03-28 15:52:42 +03:00 коммит произвёл GitHub
Родитель d32ee23a74
Коммит 7bf81f8c6d
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
5 изменённых файлов: 38 добавлений и 33 удалений

Просмотреть файл

@ -64,7 +64,7 @@ if [[ $TASK == "lint" ]]; then
echo "Linting R code" echo "Linting R code"
Rscript ${BUILD_DIRECTORY}/.ci/lint_r_code.R ${BUILD_DIRECTORY} || exit -1 Rscript ${BUILD_DIRECTORY}/.ci/lint_r_code.R ${BUILD_DIRECTORY} || exit -1
echo "Linting C++ code" echo "Linting C++ code"
cpplint --filter=-build/c++11,-build/include_subdir,-build/header_guard,-whitespace/line_length --recursive ./src ./include ./R-package ./swig || exit -1 cpplint --filter=-build/c++11,-build/include_subdir,-build/header_guard,-whitespace/line_length --recursive ./src ./include ./R-package ./swig ./tests || exit -1
exit 0 exit 0
fi fi

Просмотреть файл

@ -239,7 +239,7 @@ class ChunkedArray {
_chunks.push_back(new (std::nothrow) T[_chunk_size]); _chunks.push_back(new (std::nothrow) T[_chunk_size]);
// Check memory allocation success: // Check memory allocation success:
if (!_chunks[_chunks.size()-1]) { if (!_chunks[_chunks.size() - 1]) {
release(); release();
Log::Fatal("Memory exhausted! Cannot allocate new ChunkedArray chunk."); Log::Fatal("Memory exhausted! Cannot allocate new ChunkedArray chunk.");
} }

Просмотреть файл

@ -1,3 +1,7 @@
/*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
/** /**
* Wrap chunked_array.hpp class for SWIG usage. * Wrap chunked_array.hpp class for SWIG usage.
* *

Просмотреть файл

@ -1,6 +1,8 @@
/*! /*!
* Copyright (c) 2020 Microsoft Corporation. All rights reserved. * Copyright (c) 2020 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information. * Licensed under the MIT License. See LICENSE file in the project root for license information.
*
* Author: Alberto Ferreira
*/ */
/** /**
* This wraps the StringArray.hpp class for SWIG usage, * This wraps the StringArray.hpp class for SWIG usage,

Просмотреть файл

@ -36,14 +36,12 @@ testing::AssertionResult are_vectors_equal(const std::vector<T> &a, const std::v
class ChunkedArrayTest : public testing::Test { class ChunkedArrayTest : public testing::Test {
protected: protected:
void SetUp() override { void SetUp() override {
} }
void add_items_to_array(const std::vector<int> &vec, ChunkedArray<int> &ca) { void add_items_to_array(const std::vector<int> &vec, ChunkedArray<int> *ca) {
for (auto v: vec) { for (auto v : vec) {
ca.add(v); ca->add(v);
} }
} }
@ -52,7 +50,7 @@ class ChunkedArrayTest : public testing::Test {
it would yield the same contents as vec it would yield the same contents as vec
*/ */
testing::AssertionResult coalesced_output_equals_vec(const ChunkedArray<int> &ca, const std::vector<int> &vec, testing::AssertionResult coalesced_output_equals_vec(const ChunkedArray<int> &ca, const std::vector<int> &vec,
const bool all_addresses=false) { const bool all_addresses = false) {
std::vector<int> out(vec.size()); std::vector<int> out(vec.size());
ca.coalesce_to(out.data(), all_addresses); ca.coalesce_to(out.data(), all_addresses);
return are_vectors_equal(out, vec); return are_vectors_equal(out, vec);
@ -63,7 +61,7 @@ class ChunkedArrayTest : public testing::Test {
const size_t CHUNK_SIZE = 3; const size_t CHUNK_SIZE = 3;
const size_t OUT_OF_BOUNDS_OFFSET = 4; const size_t OUT_OF_BOUNDS_OFFSET = 4;
ChunkedArray<int> ca_ = ChunkedArray<int>(CHUNK_SIZE); //<! Re-used for many tests. ChunkedArray<int> ca_ = ChunkedArray<int>(CHUNK_SIZE); //<! Re-used for many tests.
}; };
@ -97,11 +95,11 @@ TEST_F(ChunkedArrayTest, getChunkSizeIsConstant) {
independently of the number of chunks used. independently of the number of chunks used.
*/ */
TEST_F(ChunkedArrayTest, getChunksCount) { TEST_F(ChunkedArrayTest, getChunksCount) {
ASSERT_EQ(ca_.get_chunks_count(), 1); // ChunkedArray always starts with 1 chunk. ASSERT_EQ(ca_.get_chunks_count(), 1); // ChunkedArray always starts with 1 chunk.
for (size_t i = 0; i < 3 * CHUNK_SIZE; ++i) { for (size_t i = 0; i < 3 * CHUNK_SIZE; ++i) {
ca_.add(0); ca_.add(0);
int expected_chunks = int(i/CHUNK_SIZE) + 1; int expected_chunks = static_cast<int>(i / CHUNK_SIZE) + 1;
ASSERT_EQ(ca_.get_chunks_count(), expected_chunks) << "with " << i << " add() call(s) " ASSERT_EQ(ca_.get_chunks_count(), expected_chunks) << "with " << i << " add() call(s) "
<< "and CHUNK_SIZE==" << CHUNK_SIZE << "."; << "and CHUNK_SIZE==" << CHUNK_SIZE << ".";
} }
@ -127,7 +125,7 @@ TEST_F(ChunkedArrayTest, getAddCount) {
*/ */
TEST_F(ChunkedArrayTest, coalesceTo) { TEST_F(ChunkedArrayTest, coalesceTo) {
std::vector<int> out(REF_VEC.size()); std::vector<int> out(REF_VEC.size());
add_items_to_array(REF_VEC, ca_); add_items_to_array(REF_VEC, &ca_);
ca_.coalesce_to(out.data()); ca_.coalesce_to(out.data());
@ -139,13 +137,13 @@ TEST_F(ChunkedArrayTest, coalesceTo) {
*/ */
TEST_F(ChunkedArrayTest, clear) { TEST_F(ChunkedArrayTest, clear) {
const std::vector<int> ref_vec2 = {1, 2, 5, -1}; const std::vector<int> ref_vec2 = {1, 2, 5, -1};
add_items_to_array(REF_VEC, ca_); add_items_to_array(REF_VEC, &ca_);
// Start with some content: // Start with some content:
ASSERT_TRUE(coalesced_output_equals_vec(ca_, REF_VEC)); ASSERT_TRUE(coalesced_output_equals_vec(ca_, REF_VEC));
// Clear & re-use: // Clear & re-use:
ca_.clear(); ca_.clear();
add_items_to_array(ref_vec2, ca_); add_items_to_array(ref_vec2, &ca_);
// Output should match new content: // Output should match new content:
ASSERT_TRUE(coalesced_output_equals_vec(ca_, ref_vec2)); ASSERT_TRUE(coalesced_output_equals_vec(ca_, ref_vec2));
@ -155,8 +153,8 @@ TEST_F(ChunkedArrayTest, clear) {
Ensure ChunkedArray is safe against double-frees. Ensure ChunkedArray is safe against double-frees.
*/ */
TEST_F(ChunkedArrayTest, doubleFreeSafe) { TEST_F(ChunkedArrayTest, doubleFreeSafe) {
ca_.release(); // Cannot be used any longer from now on. ca_.release(); // Cannot be used any longer from now on.
ca_.release(); // Ensure we don't segfault. ca_.release(); // Ensure we don't segfault.
SUCCEED(); SUCCEED();
} }
@ -165,12 +163,12 @@ TEST_F(ChunkedArrayTest, doubleFreeSafe) {
Ensure size computations in the getters are correct. Ensure size computations in the getters are correct.
*/ */
TEST_F(ChunkedArrayTest, totalArraySizeMatchesLastChunkAddCount) { TEST_F(ChunkedArrayTest, totalArraySizeMatchesLastChunkAddCount) {
add_items_to_array(REF_VEC, ca_); add_items_to_array(REF_VEC, &ca_);
const size_t first_chunks_add_count = (ca_.get_chunks_count() - 1) * ca_.get_chunk_size(); const size_t first_chunks_add_count = (ca_.get_chunks_count() - 1) * ca_.get_chunk_size();
const size_t last_chunk_add_count = ca_.get_last_chunk_add_count(); const size_t last_chunk_add_count = ca_.get_last_chunk_add_count();
EXPECT_EQ(first_chunks_add_count, int(REF_VEC.size()/CHUNK_SIZE) * CHUNK_SIZE); EXPECT_EQ(first_chunks_add_count, static_cast<int>(REF_VEC.size() / CHUNK_SIZE) * CHUNK_SIZE);
EXPECT_EQ(last_chunk_add_count, REF_VEC.size() % CHUNK_SIZE); EXPECT_EQ(last_chunk_add_count, REF_VEC.size() % CHUNK_SIZE);
EXPECT_EQ(first_chunks_add_count + last_chunk_add_count, ca_.get_add_count()); EXPECT_EQ(first_chunks_add_count + last_chunk_add_count, ca_.get_add_count());
} }
@ -185,10 +183,10 @@ TEST_F(ChunkedArrayTest, totalArraySizeMatchesLastChunkAddCount) {
This would occur if there was an improper data layout with the chunks. This would occur if there was an improper data layout with the chunks.
*/ */
TEST_F(ChunkedArrayTest, dataLayoutTestThroughGetitem) { TEST_F(ChunkedArrayTest, dataLayoutTestThroughGetitem) {
add_items_to_array(REF_VEC, ca_); add_items_to_array(REF_VEC, &ca_);
for (size_t i = 0, chunk = 0, in_chunk_idx = 0; i < REF_VEC.size(); ++i) { for (size_t i = 0, chunk = 0, in_chunk_idx = 0; i < REF_VEC.size(); ++i) {
int value = ca_.getitem(chunk, in_chunk_idx, -1); // -1 works as sentinel value (bad layout found) int value = ca_.getitem(chunk, in_chunk_idx, -1); // -1 works as sentinel value (bad layout found)
EXPECT_EQ(value, REF_VEC[i]) << " for address (chunk,in_chunk_idx) = (" << chunk << "," << in_chunk_idx << ")"; EXPECT_EQ(value, REF_VEC[i]) << " for address (chunk,in_chunk_idx) = (" << chunk << "," << in_chunk_idx << ")";
@ -209,7 +207,7 @@ TEST_F(ChunkedArrayTest, dataLayoutTestThroughGetitem) {
We also gradually add more chunks to the ChunkedArray and re-run more trials We also gradually add more chunks to the ChunkedArray and re-run more trials
to ensure the valid/invalid addresses are updated. to ensure the valid/invalid addresses are updated.
With each valid update we add to a "memory" vector the history of all the insertions. With each valid update we add to a "memory" vector the latest inserted values.
This is used at the end to ensure all values were stored properly, including after This is used at the end to ensure all values were stored properly, including after
value overrides. value overrides.
*/ */
@ -218,8 +216,9 @@ TEST_F(ChunkedArrayTest, testDataLayoutWithAdvancedInsertionAPI) {
const size_t MAX_IN_CHUNK_SEARCH_IDX = 2 * CHUNK_SIZE; const size_t MAX_IN_CHUNK_SEARCH_IDX = 2 * CHUNK_SIZE;
// Number of trials for each new ChunkedArray configuration. Pass 100 times over the search space: // Number of trials for each new ChunkedArray configuration. Pass 100 times over the search space:
const size_t N_TRIALS = MAX_CHUNKS_SEARCH * MAX_IN_CHUNK_SEARCH_IDX * 100; const size_t N_TRIALS = MAX_CHUNKS_SEARCH * MAX_IN_CHUNK_SEARCH_IDX * 100;
std::vector<int> overriden_trials_values(MAX_CHUNKS_SEARCH * CHUNK_SIZE); const int INVALID = -1; // A negative value signaling the requested value lives in an invalid address.
std::vector<bool> overriden_trials_mask(MAX_CHUNKS_SEARCH * CHUNK_SIZE, false); const int UNITIALIZED = -99; // A negative value to signal this was never updated.
std::vector<int> ref_values(MAX_CHUNKS_SEARCH * CHUNK_SIZE, UNITIALIZED); // Memorize latest inserted values.
// Each outer loop iteration changes the test by adding +1 chunk. We start with 1 chunk only: // Each outer loop iteration changes the test by adding +1 chunk. We start with 1 chunk only:
for (size_t chunks = 1; chunks < MAX_CHUNKS_SEARCH; ++chunks) { for (size_t chunks = 1; chunks < MAX_CHUNKS_SEARCH; ++chunks) {
@ -239,24 +238,24 @@ TEST_F(ChunkedArrayTest, testDataLayoutWithAdvancedInsertionAPI) {
// If at valid address, check that the stored value is correct & remember it for the future: // If at valid address, check that the stored value is correct & remember it for the future:
if (valid_address) { if (valid_address) {
// Check the just-stored value with getitem(): // Check the just-stored value with getitem():
EXPECT_EQ(ca_.getitem(trial_chunk, trial_in_chunk_idx, -1), trial_value); // -1 is the sentinel value. EXPECT_EQ(ca_.getitem(trial_chunk, trial_in_chunk_idx, INVALID), trial_value);
// Also store the just-stored value for future tracking: // Also store the just-stored value for future tracking:
overriden_trials_values[trial_chunk*CHUNK_SIZE + trial_in_chunk_idx] = trial_value; ref_values[trial_chunk * CHUNK_SIZE + trial_in_chunk_idx] = trial_value;
overriden_trials_mask[trial_chunk*CHUNK_SIZE + trial_in_chunk_idx] = true;
} }
} }
ca_.new_chunk(); // Just finished a round of trials. Now add a new chunk. Valid addresses will be expanded. ca_.new_chunk(); // Just finished a round of trials. Now add a new chunk. Valid addresses will be expanded.
} }
// Final check: ensure even with overrides, all valid insertions store the latest value at that address: // Final check: ensure even with overrides, all valid insertions store the latest value at that address:
std::vector<int> coalesced_out(MAX_CHUNKS_SEARCH * CHUNK_SIZE, -1); std::vector<int> coalesced_out(MAX_CHUNKS_SEARCH * CHUNK_SIZE, UNITIALIZED);
ca_.coalesce_to(coalesced_out.data(), true); // Export all valid addresses. ca_.coalesce_to(coalesced_out.data(), true); // Export all valid addresses.
for (size_t i = 0; i < overriden_trials_mask.size(); ++i) { for (size_t i = 0; i < ref_values.size(); ++i) {
if (overriden_trials_mask[i]) { if (ref_values[i] != UNITIALIZED) {
EXPECT_EQ(ca_.getitem(i/CHUNK_SIZE, i % CHUNK_SIZE, -1), overriden_trials_values[i]); // Test in 2 ways that the values are correctly laid out in memory:
EXPECT_EQ(coalesced_out[i], overriden_trials_values[i]); EXPECT_EQ(ca_.getitem(i / CHUNK_SIZE, i % CHUNK_SIZE, INVALID), ref_values[i]);
EXPECT_EQ(coalesced_out[i], ref_values[i]);
} }
} }
} }