mirror of https://gitee.com/bigwinds/arangodb
Doubled size of cache buckets to increase associativity.
This commit is contained in:
parent
77f1c66a0f
commit
78c80c3a3d
|
@ -34,7 +34,7 @@ namespace cache {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Common size for all bucket types.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
constexpr size_t BUCKET_SIZE = 64;
|
||||
constexpr size_t BUCKET_SIZE = 128;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Enum to specify cache types.
|
||||
|
|
|
@ -47,8 +47,10 @@ namespace cache {
|
|||
struct PlainBucket {
|
||||
State _state;
|
||||
|
||||
uint32_t _paddingExplicit; // fill 4-byte gap for alignment purposes
|
||||
|
||||
// actual cached entries
|
||||
static constexpr size_t slotsData = 5;
|
||||
static constexpr size_t slotsData = 10;
|
||||
uint32_t _cachedHashes[slotsData];
|
||||
CachedValue* _cachedData[slotsData];
|
||||
|
||||
|
|
|
@ -81,8 +81,8 @@ Table::Table(uint32_t logSize)
|
|||
_mask((uint32_t)((_size - 1) << _shift)),
|
||||
_buffer(new uint8_t[(_size * BUCKET_SIZE) + Table::padding]),
|
||||
_buckets(reinterpret_cast<GenericBucket*>(
|
||||
reinterpret_cast<uint64_t>((_buffer.get() + 63)) &
|
||||
~(static_cast<uint64_t>(0x3fU)))),
|
||||
reinterpret_cast<uint64_t>((_buffer.get() + (BUCKET_SIZE - 1))) &
|
||||
~(static_cast<uint64_t>(BUCKET_SIZE - 1)))),
|
||||
_auxiliary(nullptr),
|
||||
_bucketClearer(defaultClearer),
|
||||
_slotsTotal(_size),
|
||||
|
|
|
@ -43,7 +43,7 @@ class Table : public std::enable_shared_from_this<Table> {
|
|||
static const uint32_t maxLogSize;
|
||||
static constexpr uint32_t standardLogSizeAdjustment = 6;
|
||||
static constexpr int64_t triesGuarantee = -1;
|
||||
static constexpr uint64_t padding = 64;
|
||||
static constexpr uint64_t padding = BUCKET_SIZE;
|
||||
|
||||
typedef std::function<void(void*)> BucketClearer;
|
||||
|
||||
|
|
|
@ -50,8 +50,13 @@ namespace cache {
|
|||
struct TransactionalBucket {
|
||||
State _state;
|
||||
|
||||
// blacklist entries for transactional semantics
|
||||
static constexpr size_t slotsBlacklist = 5;
|
||||
uint32_t _blacklistHashes[slotsBlacklist];
|
||||
uint64_t _blacklistTerm;
|
||||
|
||||
// actual cached entries
|
||||
static constexpr size_t slotsData = 3;
|
||||
static constexpr size_t slotsData = 8;
|
||||
uint32_t _cachedHashes[slotsData];
|
||||
CachedValue* _cachedData[slotsData];
|
||||
|
||||
|
@ -60,11 +65,6 @@ struct TransactionalBucket {
|
|||
uint32_t _padding[slotsData];
|
||||
#endif
|
||||
|
||||
// blacklist entries for transactional semantics
|
||||
static constexpr size_t slotsBlacklist = 4;
|
||||
uint32_t _blacklistHashes[slotsBlacklist];
|
||||
uint64_t _blacklistTerm;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initialize an empty bucket.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -40,13 +40,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
|
|||
auto bucket = std::make_unique<PlainBucket>();
|
||||
bool success;
|
||||
|
||||
uint32_t hashes[6] = {
|
||||
uint32_t hashes[11] = {
|
||||
1, 2, 3,
|
||||
4, 5, 6}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
|
||||
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
|
||||
CachedValue* ptrs[6];
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
4, 5, 6,
|
||||
7, 8, 9,
|
||||
10, 11}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
uint64_t values[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
CachedValue* ptrs[11];
|
||||
for (size_t i = 0; i < 11; i++) {
|
||||
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
|
||||
&(values[i]), sizeof(uint64_t));
|
||||
}
|
||||
|
@ -54,31 +56,31 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
|
|||
success = bucket->lock(-1LL);
|
||||
REQUIRE(success);
|
||||
|
||||
// insert five to fill
|
||||
// insert ten to fill
|
||||
REQUIRE(!bucket->isFull());
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
bucket->insert(hashes[i], ptrs[i]);
|
||||
if (i < 4) {
|
||||
if (i < 9) {
|
||||
REQUIRE(!bucket->isFull());
|
||||
} else {
|
||||
REQUIRE(bucket->isFull());
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
CachedValue* res =
|
||||
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(res == ptrs[i]);
|
||||
}
|
||||
|
||||
// check that insert is ignored if full
|
||||
bucket->insert(hashes[5], ptrs[5]);
|
||||
CachedValue* res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
|
||||
bucket->insert(hashes[10], ptrs[10]);
|
||||
CachedValue* res = bucket->find(hashes[10], ptrs[10]->key(), ptrs[10]->keySize);
|
||||
REQUIRE(nullptr == res);
|
||||
|
||||
bucket->unlock();
|
||||
|
||||
// cleanup
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
for (size_t i = 0; i < 11; i++) {
|
||||
delete ptrs[i];
|
||||
}
|
||||
}
|
||||
|
@ -135,13 +137,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
|
|||
auto bucket = std::make_unique<PlainBucket>();
|
||||
bool success;
|
||||
|
||||
uint32_t hashes[6] = {
|
||||
uint32_t hashes[11] = {
|
||||
1, 2, 3,
|
||||
4, 5, 6}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
|
||||
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
|
||||
CachedValue* ptrs[6];
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
4, 5, 6,
|
||||
7, 8, 9,
|
||||
10, 11}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
uint64_t values[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
CachedValue* ptrs[11];
|
||||
for (size_t i = 0; i < 11; i++) {
|
||||
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
|
||||
&(values[i]), sizeof(uint64_t));
|
||||
}
|
||||
|
@ -151,15 +155,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
|
|||
|
||||
// insert five to fill
|
||||
REQUIRE(!bucket->isFull());
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
bucket->insert(hashes[i], ptrs[i]);
|
||||
if (i < 4) {
|
||||
if (i < 9) {
|
||||
REQUIRE(!bucket->isFull());
|
||||
} else {
|
||||
REQUIRE(bucket->isFull());
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < 5; i++) {
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
CachedValue* res =
|
||||
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(res == ptrs[i]);
|
||||
|
@ -182,14 +186,14 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
|
|||
REQUIRE(!bucket->isFull());
|
||||
|
||||
// check that we can insert now after eviction optimized for insertion
|
||||
bucket->insert(hashes[5], ptrs[5]);
|
||||
res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
|
||||
REQUIRE(res == ptrs[5]);
|
||||
bucket->insert(hashes[10], ptrs[10]);
|
||||
res = bucket->find(hashes[10], ptrs[10]->key(), ptrs[10]->keySize);
|
||||
REQUIRE(res == ptrs[10]);
|
||||
|
||||
bucket->unlock();
|
||||
|
||||
// cleanup
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
for (size_t i = 0; i < 11; i++) {
|
||||
delete ptrs[i];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,13 +68,13 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
auto bucket = std::make_unique<TransactionalBucket>();
|
||||
bool success;
|
||||
|
||||
uint32_t hashes[4] = {
|
||||
1, 2, 3,
|
||||
4}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[4] = {0, 1, 2, 3};
|
||||
uint64_t values[4] = {0, 1, 2, 3};
|
||||
CachedValue* ptrs[4];
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
uint32_t hashes[9] = {
|
||||
1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
uint64_t values[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CachedValue* ptrs[9];
|
||||
for (size_t i = 0; i < 9; i++) {
|
||||
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
|
||||
&(values[i]), sizeof(uint64_t));
|
||||
}
|
||||
|
@ -84,29 +84,29 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
|
||||
// insert three to fill
|
||||
REQUIRE(!bucket->isFull());
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
bucket->insert(hashes[i], ptrs[i]);
|
||||
if (i < 2) {
|
||||
if (i < 7) {
|
||||
REQUIRE(!bucket->isFull());
|
||||
} else {
|
||||
REQUIRE(bucket->isFull());
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 7; i++) {
|
||||
CachedValue* res =
|
||||
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(res == ptrs[i]);
|
||||
}
|
||||
|
||||
// check that insert is ignored if full
|
||||
bucket->insert(hashes[3], ptrs[3]);
|
||||
CachedValue* res = bucket->find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize);
|
||||
bucket->insert(hashes[8], ptrs[8]);
|
||||
CachedValue* res = bucket->find(hashes[8], ptrs[8]->key(), ptrs[8]->keySize);
|
||||
REQUIRE(nullptr == res);
|
||||
|
||||
bucket->unlock();
|
||||
|
||||
// cleanup
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
for (size_t i = 0; i < 9; i++) {
|
||||
delete ptrs[i];
|
||||
}
|
||||
}
|
||||
|
@ -163,13 +163,13 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
auto bucket = std::make_unique<TransactionalBucket>();
|
||||
bool success;
|
||||
|
||||
uint32_t hashes[4] = {
|
||||
1, 2, 3,
|
||||
4}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[4] = {0, 1, 2, 3};
|
||||
uint64_t values[4] = {0, 1, 2, 3};
|
||||
CachedValue* ptrs[4];
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
uint32_t hashes[9] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9}; // don't have to be real, but should be unique and non-zero
|
||||
uint64_t keys[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
uint64_t values[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CachedValue* ptrs[9];
|
||||
for (size_t i = 0; i < 9; i++) {
|
||||
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
|
||||
&(values[i]), sizeof(uint64_t));
|
||||
}
|
||||
|
@ -179,15 +179,15 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
|
||||
// insert three to fill
|
||||
REQUIRE(!bucket->isFull());
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
bucket->insert(hashes[i], ptrs[i]);
|
||||
if (i < 2) {
|
||||
if (i < 7) {
|
||||
REQUIRE(!bucket->isFull());
|
||||
} else {
|
||||
REQUIRE(bucket->isFull());
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
CachedValue* res =
|
||||
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(res == ptrs[i]);
|
||||
|
@ -210,14 +210,14 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
REQUIRE(!bucket->isFull());
|
||||
|
||||
// check that we can insert now after eviction optimized for insertion
|
||||
bucket->insert(hashes[3], ptrs[3]);
|
||||
res = bucket->find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize);
|
||||
REQUIRE(res == ptrs[3]);
|
||||
bucket->insert(hashes[8], ptrs[8]);
|
||||
res = bucket->find(hashes[8], ptrs[8]->key(), ptrs[8]->keySize);
|
||||
REQUIRE(res == ptrs[8]);
|
||||
|
||||
bucket->unlock();
|
||||
|
||||
// cleanup
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
for (size_t i = 0; i < 9; i++) {
|
||||
delete ptrs[i];
|
||||
}
|
||||
}
|
||||
|
@ -227,12 +227,12 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
bool success;
|
||||
CachedValue* res;
|
||||
|
||||
uint32_t hashes[7] = {1, 1, 2, 3,
|
||||
4, 5, 6}; // don't have to be real, want some overlap
|
||||
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
|
||||
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
|
||||
CachedValue* ptrs[6];
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
uint32_t hashes[8] = {1, 1, 2, 3, 4,
|
||||
5, 6, 7}; // don't have to be real, want some overlap
|
||||
uint64_t keys[8] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||
uint64_t values[8] = {0, 1, 2, 3, 4, 5, 6, 7};
|
||||
CachedValue* ptrs[8];
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
|
||||
&(values[i]), sizeof(uint64_t));
|
||||
}
|
||||
|
@ -241,26 +241,26 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
bucket->updateBlacklistTerm(1ULL);
|
||||
REQUIRE(success);
|
||||
|
||||
// insert three to fill
|
||||
// insert eight to fill
|
||||
REQUIRE(!bucket->isFull());
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
bucket->insert(hashes[i], ptrs[i]);
|
||||
if (i < 2) {
|
||||
if (i < 7) {
|
||||
REQUIRE(!bucket->isFull());
|
||||
} else {
|
||||
REQUIRE(bucket->isFull());
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(res == ptrs[i]);
|
||||
}
|
||||
|
||||
// blacklist 1-4 to fill blacklist
|
||||
for (size_t i = 1; i < 5; i++) {
|
||||
// blacklist 1-5 to fill blacklist
|
||||
for (size_t i = 1; i < 6; i++) {
|
||||
bucket->blacklist(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
}
|
||||
for (size_t i = 1; i < 5; i++) {
|
||||
for (size_t i = 1; i < 6; i++) {
|
||||
REQUIRE(bucket->isBlacklisted(hashes[i]));
|
||||
res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
|
||||
REQUIRE(nullptr == res);
|
||||
|
@ -278,16 +278,16 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
REQUIRE(nullptr == res);
|
||||
|
||||
// proceed to fully blacklist
|
||||
bucket->blacklist(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
|
||||
REQUIRE(bucket->isBlacklisted(hashes[5]));
|
||||
res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
|
||||
bucket->blacklist(hashes[6], ptrs[6]->key(), ptrs[6]->keySize);
|
||||
REQUIRE(bucket->isBlacklisted(hashes[6]));
|
||||
res = bucket->find(hashes[6], ptrs[6]->key(), ptrs[6]->keySize);
|
||||
REQUIRE(nullptr == res);
|
||||
// make sure it still didn't remove non-matching key
|
||||
res = bucket->find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
|
||||
REQUIRE(ptrs[0] == res);
|
||||
// make sure it's fully blacklisted
|
||||
REQUIRE(bucket->isFullyBlacklisted());
|
||||
REQUIRE(bucket->isBlacklisted(hashes[6]));
|
||||
REQUIRE(bucket->isBlacklisted(hashes[7]));
|
||||
|
||||
bucket->unlock();
|
||||
|
||||
|
@ -301,7 +301,7 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
|
|||
bucket->unlock();
|
||||
|
||||
// cleanup
|
||||
for (size_t i = 0; i < 6; i++) {
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
delete ptrs[i];
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue