diff --git a/arangod/Cache/PlainBucket.cpp b/arangod/Cache/PlainBucket.cpp index 6d1ccf34b7..9e4c24ead3 100644 --- a/arangod/Cache/PlainBucket.cpp +++ b/arangod/Cache/PlainBucket.cpp @@ -142,7 +142,14 @@ void PlainBucket::evict(CachedValue* value, bool optimizeForInsertion) { void PlainBucket::clear() { TRI_ASSERT(isLocked()); - memset(this, 0, sizeof(PlainBucket)); + _state.clear(); // "clear" will keep the lock! + + for (size_t i = 0; i < slotsData; ++i) { + _cachedHashes[i] = 0; + _cachedData[i] = nullptr; + } + + _state.unlock(); } void PlainBucket::moveSlot(size_t slot, bool moveToFront) { diff --git a/arangod/Cache/Table.cpp b/arangod/Cache/Table.cpp index b25defecc4..17ca4dedca 100644 --- a/arangod/Cache/Table.cpp +++ b/arangod/Cache/Table.cpp @@ -34,6 +34,8 @@ using namespace arangodb::cache; const uint32_t Table::minLogSize = 8; const uint32_t Table::maxLogSize = 32; +Table::GenericBucket::GenericBucket() : _state{}, _padding{} {} + bool Table::GenericBucket::lock(uint64_t maxTries) { return _state.lock(maxTries); } @@ -43,6 +45,16 @@ void Table::GenericBucket::unlock() { _state.unlock(); } +void Table::GenericBucket::clear() { + _state.lock(UINT64_MAX, [this]() -> void { + _state.clear(); + for (size_t i = 0; i < paddingSize; i++) { + _padding[i] = static_cast(0); + } + _state.unlock(); + }); +} + bool Table::GenericBucket::isMigrated() const { TRI_ASSERT(_state.isLocked()); return _state.isSet(BucketState::Flag::migrated); @@ -88,7 +100,19 @@ Table::Table(uint32_t logSize) _bucketClearer(defaultClearer), _slotsTotal(_size), _slotsUsed(static_cast(0)) { - memset(_buckets, 0, BUCKET_SIZE * _size); + for (size_t i = 0; i < _size; i++) { + // use placement new in order to properly initialize the bucket + new (_buckets + i) GenericBucket(); + } +} + +Table::~Table() { + for (size_t i = 0; i < _size; i++) { + // retrieve pointer to bucket + GenericBucket* b = _buckets + i; + // call dtor + b->~GenericBucket(); + } } uint64_t Table::allocationSize(uint32_t logSize) { @@ -102,8 +126,8 @@ uint64_t Table::size() const { return _size; } uint32_t Table::logSize() const { return _logSize; } -std::pair Table::fetchAndLockBucket( - uint32_t hash, uint64_t maxTries) { +std::pair Table::fetchAndLockBucket(uint32_t hash, + uint64_t maxTries) { GenericBucket* bucket = nullptr; Table* source = nullptr; bool ok = _lock.readLock(maxTries); @@ -228,14 +252,14 @@ bool Table::isEnabled(uint64_t maxTries) { bool Table::slotFilled() { size_t i = _slotsUsed.fetch_add(1, std::memory_order_acq_rel); - return ((static_cast(i + 1) / - static_cast(_slotsTotal)) > Table::idealUpperRatio); + return ((static_cast(i + 1) / static_cast(_slotsTotal)) > + Table::idealUpperRatio); } bool Table::slotEmptied() { size_t i = _slotsUsed.fetch_sub(1, std::memory_order_acq_rel); - return (((static_cast(i - 1) / - static_cast(_slotsTotal)) < Table::idealLowerRatio) && + return (((static_cast(i - 1) / static_cast(_slotsTotal)) < + Table::idealLowerRatio) && (_logSize > Table::minLogSize)); } diff --git a/arangod/Cache/Table.h b/arangod/Cache/Table.h index 09fa08a1c7..cc00d97b8c 100644 --- a/arangod/Cache/Table.h +++ b/arangod/Cache/Table.h @@ -53,9 +53,12 @@ class Table : public std::enable_shared_from_this { private: struct GenericBucket { BucketState _state; - uint8_t _filler[BUCKET_SIZE - sizeof(BucketState)]; + static constexpr size_t paddingSize = BUCKET_SIZE - sizeof(BucketState); + uint8_t _padding[paddingSize]; + GenericBucket(); bool lock(uint64_t maxTries); void unlock(); + void clear(); bool isMigrated() const; }; static_assert(sizeof(GenericBucket) == BUCKET_SIZE, @@ -87,6 +90,11 @@ class Table : public std::enable_shared_from_this
{ ////////////////////////////////////////////////////////////////////////////// explicit Table(uint32_t logSize); + ////////////////////////////////////////////////////////////////////////////// + /// @brief Destroy the table + ////////////////////////////////////////////////////////////////////////////// + ~Table(); + public: ////////////////////////////////////////////////////////////////////////////// /// @brief Returns the memory usage for a table with specified logSize diff --git a/arangod/Cache/TransactionalBucket.cpp b/arangod/Cache/TransactionalBucket.cpp index 423a2c4b76..3382fe699a 100644 --- a/arangod/Cache/TransactionalBucket.cpp +++ b/arangod/Cache/TransactionalBucket.cpp @@ -198,7 +198,16 @@ void TransactionalBucket::evict(CachedValue* value, bool optimizeForInsertion) { void TransactionalBucket::clear() { TRI_ASSERT(isLocked()); - memset(this, 0, sizeof(TransactionalBucket)); + _state.clear(); // "clear" will keep the lock! + for (size_t i = 0; i < slotsBlacklist; ++i) { + _blacklistHashes[i] = 0; + } + _blacklistTerm = 0; + for (size_t i = 0; i < slotsData; ++i) { + _cachedHashes[i] = 0; + _cachedData[i] = nullptr; + } + _state.unlock(); } void TransactionalBucket::updateBlacklistTerm(uint64_t term) {