1
0
Fork 0

Avoid memset on non-POD types for portability. (#5558)

This commit is contained in:
Dan Larkin-York 2018-06-12 02:49:37 -04:00 committed by Jan
parent c6e1672338
commit 66e9fb1dbd
4 changed files with 58 additions and 10 deletions

View File

@ -142,7 +142,14 @@ void PlainBucket::evict(CachedValue* value, bool optimizeForInsertion) {
void PlainBucket::clear() { void PlainBucket::clear() {
TRI_ASSERT(isLocked()); TRI_ASSERT(isLocked());
memset(this, 0, sizeof(PlainBucket)); _state.clear(); // "clear" will keep the lock!
for (size_t i = 0; i < slotsData; ++i) {
_cachedHashes[i] = 0;
_cachedData[i] = nullptr;
}
_state.unlock();
} }
void PlainBucket::moveSlot(size_t slot, bool moveToFront) { void PlainBucket::moveSlot(size_t slot, bool moveToFront) {

View File

@ -34,6 +34,8 @@ using namespace arangodb::cache;
const uint32_t Table::minLogSize = 8; const uint32_t Table::minLogSize = 8;
const uint32_t Table::maxLogSize = 32; const uint32_t Table::maxLogSize = 32;
Table::GenericBucket::GenericBucket() : _state{}, _padding{} {}
bool Table::GenericBucket::lock(uint64_t maxTries) { bool Table::GenericBucket::lock(uint64_t maxTries) {
return _state.lock(maxTries); return _state.lock(maxTries);
} }
@ -43,6 +45,16 @@ void Table::GenericBucket::unlock() {
_state.unlock(); _state.unlock();
} }
void Table::GenericBucket::clear() {
_state.lock(UINT64_MAX, [this]() -> void {
_state.clear();
for (size_t i = 0; i < paddingSize; i++) {
_padding[i] = static_cast<uint8_t>(0);
}
_state.unlock();
});
}
bool Table::GenericBucket::isMigrated() const { bool Table::GenericBucket::isMigrated() const {
TRI_ASSERT(_state.isLocked()); TRI_ASSERT(_state.isLocked());
return _state.isSet(BucketState::Flag::migrated); return _state.isSet(BucketState::Flag::migrated);
@ -88,7 +100,19 @@ Table::Table(uint32_t logSize)
_bucketClearer(defaultClearer), _bucketClearer(defaultClearer),
_slotsTotal(_size), _slotsTotal(_size),
_slotsUsed(static_cast<uint64_t>(0)) { _slotsUsed(static_cast<uint64_t>(0)) {
memset(_buckets, 0, BUCKET_SIZE * _size); for (size_t i = 0; i < _size; i++) {
// use placement new in order to properly initialize the bucket
new (_buckets + i) GenericBucket();
}
}
Table::~Table() {
for (size_t i = 0; i < _size; i++) {
// retrieve pointer to bucket
GenericBucket* b = _buckets + i;
// call dtor
b->~GenericBucket();
}
} }
uint64_t Table::allocationSize(uint32_t logSize) { uint64_t Table::allocationSize(uint32_t logSize) {
@ -102,8 +126,8 @@ uint64_t Table::size() const { return _size; }
uint32_t Table::logSize() const { return _logSize; } uint32_t Table::logSize() const { return _logSize; }
std::pair<void*, Table*> Table::fetchAndLockBucket( std::pair<void*, Table*> Table::fetchAndLockBucket(uint32_t hash,
uint32_t hash, uint64_t maxTries) { uint64_t maxTries) {
GenericBucket* bucket = nullptr; GenericBucket* bucket = nullptr;
Table* source = nullptr; Table* source = nullptr;
bool ok = _lock.readLock(maxTries); bool ok = _lock.readLock(maxTries);
@ -228,14 +252,14 @@ bool Table::isEnabled(uint64_t maxTries) {
bool Table::slotFilled() { bool Table::slotFilled() {
size_t i = _slotsUsed.fetch_add(1, std::memory_order_acq_rel); size_t i = _slotsUsed.fetch_add(1, std::memory_order_acq_rel);
return ((static_cast<double>(i + 1) / return ((static_cast<double>(i + 1) / static_cast<double>(_slotsTotal)) >
static_cast<double>(_slotsTotal)) > Table::idealUpperRatio); Table::idealUpperRatio);
} }
bool Table::slotEmptied() { bool Table::slotEmptied() {
size_t i = _slotsUsed.fetch_sub(1, std::memory_order_acq_rel); size_t i = _slotsUsed.fetch_sub(1, std::memory_order_acq_rel);
return (((static_cast<double>(i - 1) / return (((static_cast<double>(i - 1) / static_cast<double>(_slotsTotal)) <
static_cast<double>(_slotsTotal)) < Table::idealLowerRatio) && Table::idealLowerRatio) &&
(_logSize > Table::minLogSize)); (_logSize > Table::minLogSize));
} }

View File

@ -53,9 +53,12 @@ class Table : public std::enable_shared_from_this<Table> {
private: private:
struct GenericBucket { struct GenericBucket {
BucketState _state; BucketState _state;
uint8_t _filler[BUCKET_SIZE - sizeof(BucketState)]; static constexpr size_t paddingSize = BUCKET_SIZE - sizeof(BucketState);
uint8_t _padding[paddingSize];
GenericBucket();
bool lock(uint64_t maxTries); bool lock(uint64_t maxTries);
void unlock(); void unlock();
void clear();
bool isMigrated() const; bool isMigrated() const;
}; };
static_assert(sizeof(GenericBucket) == BUCKET_SIZE, static_assert(sizeof(GenericBucket) == BUCKET_SIZE,
@ -87,6 +90,11 @@ class Table : public std::enable_shared_from_this<Table> {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
explicit Table(uint32_t logSize); explicit Table(uint32_t logSize);
//////////////////////////////////////////////////////////////////////////////
/// @brief Destroy the table
//////////////////////////////////////////////////////////////////////////////
~Table();
public: public:
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Returns the memory usage for a table with specified logSize /// @brief Returns the memory usage for a table with specified logSize

View File

@ -198,7 +198,16 @@ void TransactionalBucket::evict(CachedValue* value, bool optimizeForInsertion) {
void TransactionalBucket::clear() { void TransactionalBucket::clear() {
TRI_ASSERT(isLocked()); TRI_ASSERT(isLocked());
memset(this, 0, sizeof(TransactionalBucket)); _state.clear(); // "clear" will keep the lock!
for (size_t i = 0; i < slotsBlacklist; ++i) {
_blacklistHashes[i] = 0;
}
_blacklistTerm = 0;
for (size_t i = 0; i < slotsData; ++i) {
_cachedHashes[i] = 0;
_cachedData[i] = nullptr;
}
_state.unlock();
} }
void TransactionalBucket::updateBlacklistTerm(uint64_t term) { void TransactionalBucket::updateBlacklistTerm(uint64_t term) {