1
0
Fork 0

Doubled size of cache buckets to increase associativity.

This commit is contained in:
Dan Larkin 2017-05-23 17:51:47 -04:00
parent 77f1c66a0f
commit 78c80c3a3d
7 changed files with 88 additions and 82 deletions

View File

@ -34,7 +34,7 @@ namespace cache {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief Common size for all bucket types. /// @brief Common size for all bucket types.
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
constexpr size_t BUCKET_SIZE = 64; constexpr size_t BUCKET_SIZE = 128;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief Enum to specify cache types. /// @brief Enum to specify cache types.

View File

@ -47,8 +47,10 @@ namespace cache {
struct PlainBucket { struct PlainBucket {
State _state; State _state;
uint32_t _paddingExplicit; // fill 4-byte gap for alignment purposes
// actual cached entries // actual cached entries
static constexpr size_t slotsData = 5; static constexpr size_t slotsData = 10;
uint32_t _cachedHashes[slotsData]; uint32_t _cachedHashes[slotsData];
CachedValue* _cachedData[slotsData]; CachedValue* _cachedData[slotsData];

View File

@ -81,8 +81,8 @@ Table::Table(uint32_t logSize)
_mask((uint32_t)((_size - 1) << _shift)), _mask((uint32_t)((_size - 1) << _shift)),
_buffer(new uint8_t[(_size * BUCKET_SIZE) + Table::padding]), _buffer(new uint8_t[(_size * BUCKET_SIZE) + Table::padding]),
_buckets(reinterpret_cast<GenericBucket*>( _buckets(reinterpret_cast<GenericBucket*>(
reinterpret_cast<uint64_t>((_buffer.get() + 63)) & reinterpret_cast<uint64_t>((_buffer.get() + (BUCKET_SIZE - 1))) &
~(static_cast<uint64_t>(0x3fU)))), ~(static_cast<uint64_t>(BUCKET_SIZE - 1)))),
_auxiliary(nullptr), _auxiliary(nullptr),
_bucketClearer(defaultClearer), _bucketClearer(defaultClearer),
_slotsTotal(_size), _slotsTotal(_size),

View File

@ -43,7 +43,7 @@ class Table : public std::enable_shared_from_this<Table> {
static const uint32_t maxLogSize; static const uint32_t maxLogSize;
static constexpr uint32_t standardLogSizeAdjustment = 6; static constexpr uint32_t standardLogSizeAdjustment = 6;
static constexpr int64_t triesGuarantee = -1; static constexpr int64_t triesGuarantee = -1;
static constexpr uint64_t padding = 64; static constexpr uint64_t padding = BUCKET_SIZE;
typedef std::function<void(void*)> BucketClearer; typedef std::function<void(void*)> BucketClearer;

View File

@ -50,8 +50,13 @@ namespace cache {
struct TransactionalBucket { struct TransactionalBucket {
State _state; State _state;
// blacklist entries for transactional semantics
static constexpr size_t slotsBlacklist = 5;
uint32_t _blacklistHashes[slotsBlacklist];
uint64_t _blacklistTerm;
// actual cached entries // actual cached entries
static constexpr size_t slotsData = 3; static constexpr size_t slotsData = 8;
uint32_t _cachedHashes[slotsData]; uint32_t _cachedHashes[slotsData];
CachedValue* _cachedData[slotsData]; CachedValue* _cachedData[slotsData];
@ -60,11 +65,6 @@ struct TransactionalBucket {
uint32_t _padding[slotsData]; uint32_t _padding[slotsData];
#endif #endif
// blacklist entries for transactional semantics
static constexpr size_t slotsBlacklist = 4;
uint32_t _blacklistHashes[slotsBlacklist];
uint64_t _blacklistTerm;
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Initialize an empty bucket. /// @brief Initialize an empty bucket.
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////

View File

@ -40,13 +40,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
auto bucket = std::make_unique<PlainBucket>(); auto bucket = std::make_unique<PlainBucket>();
bool success; bool success;
uint32_t hashes[6] = { uint32_t hashes[11] = {
1, 2, 3, 1, 2, 3,
4, 5, 6}; // don't have to be real, but should be unique and non-zero 4, 5, 6,
uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; 7, 8, 9,
uint64_t values[6] = {0, 1, 2, 3, 4, 5}; 10, 11}; // don't have to be real, but should be unique and non-zero
CachedValue* ptrs[6]; uint64_t keys[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (size_t i = 0; i < 6; i++) { uint64_t values[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
CachedValue* ptrs[11];
for (size_t i = 0; i < 11; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
&(values[i]), sizeof(uint64_t)); &(values[i]), sizeof(uint64_t));
} }
@ -54,31 +56,31 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
success = bucket->lock(-1LL); success = bucket->lock(-1LL);
REQUIRE(success); REQUIRE(success);
// insert five to fill // insert ten to fill
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
for (size_t i = 0; i < 5; i++) { for (size_t i = 0; i < 10; i++) {
bucket->insert(hashes[i], ptrs[i]); bucket->insert(hashes[i], ptrs[i]);
if (i < 4) { if (i < 9) {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
} else { } else {
REQUIRE(bucket->isFull()); REQUIRE(bucket->isFull());
} }
} }
for (size_t i = 0; i < 5; i++) { for (size_t i = 0; i < 10; i++) {
CachedValue* res = CachedValue* res =
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(res == ptrs[i]); REQUIRE(res == ptrs[i]);
} }
// check that insert is ignored if full // check that insert is ignored if full
bucket->insert(hashes[5], ptrs[5]); bucket->insert(hashes[10], ptrs[10]);
CachedValue* res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); CachedValue* res = bucket->find(hashes[10], ptrs[10]->key(), ptrs[10]->keySize);
REQUIRE(nullptr == res); REQUIRE(nullptr == res);
bucket->unlock(); bucket->unlock();
// cleanup // cleanup
for (size_t i = 0; i < 6; i++) { for (size_t i = 0; i < 11; i++) {
delete ptrs[i]; delete ptrs[i];
} }
} }
@ -135,13 +137,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
auto bucket = std::make_unique<PlainBucket>(); auto bucket = std::make_unique<PlainBucket>();
bool success; bool success;
uint32_t hashes[6] = { uint32_t hashes[11] = {
1, 2, 3, 1, 2, 3,
4, 5, 6}; // don't have to be real, but should be unique and non-zero 4, 5, 6,
uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; 7, 8, 9,
uint64_t values[6] = {0, 1, 2, 3, 4, 5}; 10, 11}; // don't have to be real, but should be unique and non-zero
CachedValue* ptrs[6]; uint64_t keys[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (size_t i = 0; i < 6; i++) { uint64_t values[11] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
CachedValue* ptrs[11];
for (size_t i = 0; i < 11; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
&(values[i]), sizeof(uint64_t)); &(values[i]), sizeof(uint64_t));
} }
@ -151,15 +155,15 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
// insert five to fill // insert five to fill
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
for (size_t i = 0; i < 5; i++) { for (size_t i = 0; i < 10; i++) {
bucket->insert(hashes[i], ptrs[i]); bucket->insert(hashes[i], ptrs[i]);
if (i < 4) { if (i < 9) {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
} else { } else {
REQUIRE(bucket->isFull()); REQUIRE(bucket->isFull());
} }
} }
for (size_t i = 0; i < 5; i++) { for (size_t i = 0; i < 10; i++) {
CachedValue* res = CachedValue* res =
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(res == ptrs[i]); REQUIRE(res == ptrs[i]);
@ -182,14 +186,14 @@ TEST_CASE("cache::PlainBucket", "[cache]") {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
// check that we can insert now after eviction optimized for insertion // check that we can insert now after eviction optimized for insertion
bucket->insert(hashes[5], ptrs[5]); bucket->insert(hashes[10], ptrs[10]);
res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); res = bucket->find(hashes[10], ptrs[10]->key(), ptrs[10]->keySize);
REQUIRE(res == ptrs[5]); REQUIRE(res == ptrs[10]);
bucket->unlock(); bucket->unlock();
// cleanup // cleanup
for (size_t i = 0; i < 6; i++) { for (size_t i = 0; i < 11; i++) {
delete ptrs[i]; delete ptrs[i];
} }
} }

View File

@ -68,13 +68,13 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
auto bucket = std::make_unique<TransactionalBucket>(); auto bucket = std::make_unique<TransactionalBucket>();
bool success; bool success;
uint32_t hashes[4] = { uint32_t hashes[9] = {
1, 2, 3, 1, 2, 3, 4, 5, 6, 7,
4}; // don't have to be real, but should be unique and non-zero 8, 9}; // don't have to be real, but should be unique and non-zero
uint64_t keys[4] = {0, 1, 2, 3}; uint64_t keys[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
uint64_t values[4] = {0, 1, 2, 3}; uint64_t values[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CachedValue* ptrs[4]; CachedValue* ptrs[9];
for (size_t i = 0; i < 4; i++) { for (size_t i = 0; i < 9; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
&(values[i]), sizeof(uint64_t)); &(values[i]), sizeof(uint64_t));
} }
@ -84,29 +84,29 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
// insert three to fill // insert three to fill
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 8; i++) {
bucket->insert(hashes[i], ptrs[i]); bucket->insert(hashes[i], ptrs[i]);
if (i < 2) { if (i < 7) {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
} else { } else {
REQUIRE(bucket->isFull()); REQUIRE(bucket->isFull());
} }
} }
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 7; i++) {
CachedValue* res = CachedValue* res =
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(res == ptrs[i]); REQUIRE(res == ptrs[i]);
} }
// check that insert is ignored if full // check that insert is ignored if full
bucket->insert(hashes[3], ptrs[3]); bucket->insert(hashes[8], ptrs[8]);
CachedValue* res = bucket->find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize); CachedValue* res = bucket->find(hashes[8], ptrs[8]->key(), ptrs[8]->keySize);
REQUIRE(nullptr == res); REQUIRE(nullptr == res);
bucket->unlock(); bucket->unlock();
// cleanup // cleanup
for (size_t i = 0; i < 4; i++) { for (size_t i = 0; i < 9; i++) {
delete ptrs[i]; delete ptrs[i];
} }
} }
@ -163,13 +163,13 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
auto bucket = std::make_unique<TransactionalBucket>(); auto bucket = std::make_unique<TransactionalBucket>();
bool success; bool success;
uint32_t hashes[4] = { uint32_t hashes[9] = {
1, 2, 3, 1, 2, 3, 4, 5, 6, 7, 8,
4}; // don't have to be real, but should be unique and non-zero 9}; // don't have to be real, but should be unique and non-zero
uint64_t keys[4] = {0, 1, 2, 3}; uint64_t keys[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
uint64_t values[4] = {0, 1, 2, 3}; uint64_t values[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
CachedValue* ptrs[4]; CachedValue* ptrs[9];
for (size_t i = 0; i < 4; i++) { for (size_t i = 0; i < 9; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
&(values[i]), sizeof(uint64_t)); &(values[i]), sizeof(uint64_t));
} }
@ -179,15 +179,15 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
// insert three to fill // insert three to fill
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 8; i++) {
bucket->insert(hashes[i], ptrs[i]); bucket->insert(hashes[i], ptrs[i]);
if (i < 2) { if (i < 7) {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
} else { } else {
REQUIRE(bucket->isFull()); REQUIRE(bucket->isFull());
} }
} }
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 8; i++) {
CachedValue* res = CachedValue* res =
bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(res == ptrs[i]); REQUIRE(res == ptrs[i]);
@ -210,14 +210,14 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
// check that we can insert now after eviction optimized for insertion // check that we can insert now after eviction optimized for insertion
bucket->insert(hashes[3], ptrs[3]); bucket->insert(hashes[8], ptrs[8]);
res = bucket->find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize); res = bucket->find(hashes[8], ptrs[8]->key(), ptrs[8]->keySize);
REQUIRE(res == ptrs[3]); REQUIRE(res == ptrs[8]);
bucket->unlock(); bucket->unlock();
// cleanup // cleanup
for (size_t i = 0; i < 4; i++) { for (size_t i = 0; i < 9; i++) {
delete ptrs[i]; delete ptrs[i];
} }
} }
@ -227,12 +227,12 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
bool success; bool success;
CachedValue* res; CachedValue* res;
uint32_t hashes[7] = {1, 1, 2, 3, uint32_t hashes[8] = {1, 1, 2, 3, 4,
4, 5, 6}; // don't have to be real, want some overlap 5, 6, 7}; // don't have to be real, want some overlap
uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; uint64_t keys[8] = {0, 1, 2, 3, 4, 5, 6, 7};
uint64_t values[6] = {0, 1, 2, 3, 4, 5}; uint64_t values[8] = {0, 1, 2, 3, 4, 5, 6, 7};
CachedValue* ptrs[6]; CachedValue* ptrs[8];
for (size_t i = 0; i < 6; i++) { for (size_t i = 0; i < 8; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t),
&(values[i]), sizeof(uint64_t)); &(values[i]), sizeof(uint64_t));
} }
@ -241,26 +241,26 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
bucket->updateBlacklistTerm(1ULL); bucket->updateBlacklistTerm(1ULL);
REQUIRE(success); REQUIRE(success);
// insert three to fill // insert eight to fill
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 8; i++) {
bucket->insert(hashes[i], ptrs[i]); bucket->insert(hashes[i], ptrs[i]);
if (i < 2) { if (i < 7) {
REQUIRE(!bucket->isFull()); REQUIRE(!bucket->isFull());
} else { } else {
REQUIRE(bucket->isFull()); REQUIRE(bucket->isFull());
} }
} }
for (size_t i = 0; i < 3; i++) { for (size_t i = 0; i < 8; i++) {
res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(res == ptrs[i]); REQUIRE(res == ptrs[i]);
} }
// blacklist 1-4 to fill blacklist // blacklist 1-5 to fill blacklist
for (size_t i = 1; i < 5; i++) { for (size_t i = 1; i < 6; i++) {
bucket->blacklist(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); bucket->blacklist(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
} }
for (size_t i = 1; i < 5; i++) { for (size_t i = 1; i < 6; i++) {
REQUIRE(bucket->isBlacklisted(hashes[i])); REQUIRE(bucket->isBlacklisted(hashes[i]));
res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); res = bucket->find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
REQUIRE(nullptr == res); REQUIRE(nullptr == res);
@ -278,16 +278,16 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
REQUIRE(nullptr == res); REQUIRE(nullptr == res);
// proceed to fully blacklist // proceed to fully blacklist
bucket->blacklist(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); bucket->blacklist(hashes[6], ptrs[6]->key(), ptrs[6]->keySize);
REQUIRE(bucket->isBlacklisted(hashes[5])); REQUIRE(bucket->isBlacklisted(hashes[6]));
res = bucket->find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); res = bucket->find(hashes[6], ptrs[6]->key(), ptrs[6]->keySize);
REQUIRE(nullptr == res); REQUIRE(nullptr == res);
// make sure it still didn't remove non-matching key // make sure it still didn't remove non-matching key
res = bucket->find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); res = bucket->find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
REQUIRE(ptrs[0] == res); REQUIRE(ptrs[0] == res);
// make sure it's fully blacklisted // make sure it's fully blacklisted
REQUIRE(bucket->isFullyBlacklisted()); REQUIRE(bucket->isFullyBlacklisted());
REQUIRE(bucket->isBlacklisted(hashes[6])); REQUIRE(bucket->isBlacklisted(hashes[7]));
bucket->unlock(); bucket->unlock();
@ -301,7 +301,7 @@ TEST_CASE("cache::TransactionalBucket", "[cache]") {
bucket->unlock(); bucket->unlock();
// cleanup // cleanup
for (size_t i = 0; i < 6; i++) { for (size_t i = 0; i < 8; i++) {
delete ptrs[i]; delete ptrs[i];
} }
} }