//////////////////////////////////////////////////////////////////////////////// /// @brief test suite for arangodb::cache::State /// /// @file /// /// DISCLAIMER /// /// Copyright 2017 ArangoDB GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is ArangoDB GmbH, Cologne, Germany /// /// @author Daniel H. Larkin /// @author Copyright 2017, ArangoDB GmbH, Cologne, Germany //////////////////////////////////////////////////////////////////////////////// #include "Basics/Common.h" #include "Cache/Common.h" #include "Cache/PlainBucket.h" #include "Cache/Table.h" #include "gtest/gtest.h" #include #include using namespace arangodb::cache; TEST(CacheTableTest, test_static_allocation_size_method) { for (uint32_t i = Table::minLogSize; i <= Table::maxLogSize; i++) { ASSERT_TRUE(Table::allocationSize(i) == (sizeof(Table) + (BUCKET_SIZE << i) + Table::padding)); } } TEST(CacheTableTest, test_basic_constructor_behavior) { for (uint32_t i = Table::minLogSize; i <= 20; i++) { auto table = std::make_shared(i); ASSERT_NE(table.get(), nullptr); ASSERT_EQ(table->memoryUsage(), (sizeof(Table) + (BUCKET_SIZE << i) + Table::padding)); ASSERT_EQ(table->logSize(), i); ASSERT_EQ(table->size(), (static_cast(1) << i)); } } TEST(CacheTableTest, test_basic_bucket_fetching_behavior) { auto table = std::make_shared
(Table::minLogSize); ASSERT_NE(table.get(), nullptr); table->enable(); for (uint64_t i = 0; i < table->size(); i++) { uint32_t hash = static_cast(i << (32 - Table::minLogSize)); auto pair = table->fetchAndLockBucket(hash, -1); auto bucket = reinterpret_cast(pair.first); auto source = pair.second; ASSERT_NE(bucket, nullptr); ASSERT_TRUE(bucket->isLocked()); ASSERT_NE(source, nullptr); ASSERT_EQ(source, table.get()); auto rawBucket = reinterpret_cast(table->primaryBucket(i)); ASSERT_EQ(bucket, rawBucket); auto badPair = table->fetchAndLockBucket(hash, 10); auto badBucket = reinterpret_cast(badPair.first); auto badSource = badPair.second; ASSERT_EQ(badBucket, nullptr); ASSERT_EQ(badSource, nullptr); bucket->unlock(); } } class CacheTableMigrationTest : public ::testing::Test { protected: std::shared_ptr
small; std::shared_ptr
large; std::shared_ptr
huge; CacheTableMigrationTest() : small(std::make_shared
(Table::minLogSize)), large(std::make_shared
(Table::minLogSize + 2)), huge(std::make_shared
(Table::minLogSize + 4)) { small->enable(); large->enable(); huge->enable(); } }; TEST_F(CacheTableMigrationTest, check_that_setauxiliary_works_as_intended) { auto res = small->setAuxiliary(large); ASSERT_EQ(res.get(), nullptr); res = small->setAuxiliary(huge); ASSERT_EQ(res.get(), huge.get()); res = small->setAuxiliary(std::shared_ptr
(nullptr)); ASSERT_EQ(res.get(), large.get()); } TEST_F(CacheTableMigrationTest, check_that_bucket_locking_falls_through_appropriately) { auto res = small->setAuxiliary(large); ASSERT_EQ(res.get(), nullptr); uint32_t indexSmall = 17; // picked something at "random" uint32_t indexLarge = indexSmall << 2; uint32_t hash = indexSmall << (32 - small->logSize()); auto pair = small->fetchAndLockBucket(hash, -1); auto bucket = reinterpret_cast(pair.first); auto source = pair.second; ASSERT_EQ(bucket, reinterpret_cast(small->primaryBucket(indexSmall))); bucket->_state.toggleFlag(BucketState::Flag::migrated); bucket->unlock(); ASSERT_EQ(source, small.get()); pair = small->fetchAndLockBucket(hash, -1); bucket = reinterpret_cast(pair.first); source = pair.second; ASSERT_EQ(bucket, reinterpret_cast(large->primaryBucket(indexLarge))); ASSERT_EQ(source, large.get()); pair = small->fetchAndLockBucket(hash, 10); ASSERT_EQ(pair.first, nullptr); ASSERT_EQ(pair.second, nullptr); bucket->unlock(); } TEST_F(CacheTableMigrationTest, check_subtable_fetching_for_moving_to_a_smaller_table) { auto res = large->setAuxiliary(small); ASSERT_EQ(res.get(), nullptr); uint32_t indexLarge = 822; // picked something at "random" uint32_t indexSmall = indexLarge >> 2; uint32_t hash = indexLarge << (32 - large->logSize()); auto subtable = large->auxiliaryBuckets(indexLarge); ASSERT_NE(subtable.get(), nullptr); auto bucket = subtable->fetchBucket(hash); ASSERT_EQ(bucket, small->primaryBucket(indexSmall)); } TEST_F(CacheTableMigrationTest, check_subtable_fetching_for_moving_to_a_larger_table) { auto res = small->setAuxiliary(large); ASSERT_EQ(res.get(), nullptr); uint32_t indexSmall = 217; // picked something at "random" uint32_t indexLargeBase = indexSmall << 2; auto subtable = small->auxiliaryBuckets(indexSmall); ASSERT_NE(subtable.get(), nullptr); for (uint32_t i = 0; i < 4; i++) { uint32_t indexLarge = indexLargeBase + i; uint32_t hash = indexLarge << (32 - large->logSize()); ASSERT_EQ(subtable->fetchBucket(hash), large->primaryBucket(indexLarge)); } } TEST_F(CacheTableMigrationTest, check_subtable_apply_all_works) { auto res = small->setAuxiliary(large); ASSERT_EQ(res.get(), nullptr); uint32_t indexSmall = 172; // picked something at "random" uint32_t indexLargeBase = indexSmall << 2; auto subtable = small->auxiliaryBuckets(indexSmall); ASSERT_NE(subtable.get(), nullptr); subtable->applyToAllBuckets([](void* ptr) -> bool { PlainBucket* bucket = reinterpret_cast(ptr); return bucket->lock(-1); }); for (uint32_t i = 0; i < 4; i++) { uint32_t indexLarge = indexLargeBase + i; uint32_t hash = indexLarge << (32 - large->logSize()); auto bucket = reinterpret_cast(subtable->fetchBucket(hash)); ASSERT_TRUE(bucket->isLocked()); } subtable->applyToAllBuckets([](void* ptr) -> bool { PlainBucket* bucket = reinterpret_cast(ptr); bucket->unlock(); return true; }); } TEST_F(CacheTableMigrationTest, test_fill_ratio_methods) { for (uint64_t i = 0; i < large->size(); i++) { bool res = large->slotFilled(); if (static_cast(i + 1) < 0.04 * static_cast(large->size())) { ASSERT_EQ(large->idealSize(), large->logSize() - 1); ASSERT_FALSE(res); } else if (static_cast(i + 1) > 0.25 * static_cast(large->size())) { ASSERT_EQ(large->idealSize(), large->logSize() + 1); ASSERT_TRUE(res); } else { ASSERT_EQ(large->idealSize(), large->logSize()); ASSERT_FALSE(res); } } for (uint64_t i = large->size(); i > 0; i--) { bool res = large->slotEmptied(); if (static_cast(i - 1) < 0.04 * static_cast(large->size())) { ASSERT_EQ(large->idealSize(), large->logSize() - 1); ASSERT_TRUE(res); } else if (static_cast(i - 1) > 0.25 * static_cast(large->size())) { ASSERT_EQ(large->idealSize(), large->logSize() + 1); ASSERT_FALSE(res); } else { ASSERT_EQ(large->idealSize(), large->logSize()); ASSERT_FALSE(res); } } }