1
0
Fork 0

Fix compiler warnings and needlessly failing catch tests. (#3171)

This commit is contained in:
Dan Larkin 2017-08-31 11:27:25 -04:00 committed by Frank Celler
parent 524f02190c
commit e2fdab431c
17 changed files with 145 additions and 86 deletions

View File

@ -385,25 +385,22 @@ bool Cache::canResize() {
}
bool Cache::canMigrate() {
bool allowed = (_manager->ioService() != nullptr);
bool allowed = _state.lock(Cache::triesSlow);
if (allowed) {
allowed = _state.lock(Cache::triesSlow);
if (allowed) {
if (isOperational()) {
if (_state.isSet(State::Flag::migrating)) {
allowed = false;
} else {
_metadata.lock();
if (_metadata.isSet(State::Flag::migrating)) {
allowed = false;
}
_metadata.unlock();
}
} else {
if (isOperational()) {
if (_state.isSet(State::Flag::migrating)) {
allowed = false;
} else {
_metadata.lock();
if (_metadata.isSet(State::Flag::migrating)) {
allowed = false;
}
_metadata.unlock();
}
_state.unlock();
} else {
allowed = false;
}
_state.unlock();
}
return allowed;

View File

@ -98,8 +98,11 @@ void CacheManagerFeature::validateOptions(
void CacheManagerFeature::start() {
auto scheduler = SchedulerFeature::SCHEDULER;
auto ioService = (scheduler == nullptr) ? nullptr : scheduler->ioService();
_manager.reset(new Manager(ioService, _cacheSize));
auto postFn = [scheduler](std::function<void()> fn) -> bool {
scheduler->post(fn);
return true;
};
_manager.reset(new Manager(postFn, _cacheSize));
MANAGER = _manager.get();
_rebalancer.reset(
new CacheRebalancerThread(_manager.get(), _rebalancingInterval));

View File

@ -33,7 +33,14 @@ const size_t CachedValue::_headerAllocSize = sizeof(CachedValue) +
CachedValue* CachedValue::copy() const {
uint8_t* buf = new uint8_t[size()];
CachedValue* value = new (buf + offset()) CachedValue(*this);
CachedValue* value = nullptr;
try {
value = new (buf + offset()) CachedValue(*this);
} catch (...) {
delete[] buf;
return nullptr;
}
return value;
}
@ -45,11 +52,17 @@ CachedValue* CachedValue::construct(void const* k, size_t kSize,
}
uint8_t* buf = new uint8_t[_headerAllocSize + kSize + vSize];
uint8_t* aligned = reinterpret_cast<uint8_t*>(
(reinterpret_cast<size_t>(buf) + _headerAllocOffset) &
_headerAllocMask);
size_t offset = buf - aligned;
CachedValue* cv = new (aligned) CachedValue(offset, k, kSize, v, vSize);
CachedValue* cv = nullptr;
try {
uint8_t* aligned = reinterpret_cast<uint8_t*>(
(reinterpret_cast<size_t>(buf) + _headerAllocOffset) &
_headerAllocMask);
size_t offset = buf - aligned;
cv = new (aligned) CachedValue(offset, k, kSize, v, vSize);
} catch (...) {
delete[] buf;
return nullptr;
}
return cv;
}
@ -64,7 +77,7 @@ void CachedValue::operator delete(void* ptr) {
CachedValue::CachedValue(size_t off, void const* k, size_t kSize,
void const* v, size_t vSize)
: _refCount(0),
_keySize(kSize + (off << _offsetShift)),
_keySize(static_cast<uint32_t>(kSize + (off << _offsetShift))),
_valueSize(vSize) {
std::memcpy(const_cast<uint8_t*>(key()), k, kSize);
if (vSize > 0) {

View File

@ -67,7 +67,7 @@ size_t Manager::hash_weak_ptr::operator()(
return std::hash<decltype(sp)>()(sp);
}
Manager::Manager(boost::asio::io_service* ioService, uint64_t globalLimit,
Manager::Manager(PostFn schedulerPost, uint64_t globalLimit,
bool enableWindowedStats)
: _state(),
_accessStats((globalLimit >= (1024 * 1024 * 1024))
@ -89,7 +89,7 @@ Manager::Manager(boost::asio::io_service* ioService, uint64_t globalLimit,
_spareTableAllocation(0),
_globalAllocation(_fixedAllocation),
_transactions(),
_ioService(ioService),
_schedulerPost(schedulerPost),
_resizeAttempt(0),
_outstandingTasks(0),
_rebalancingTasks(0),
@ -294,6 +294,8 @@ Transaction* Manager::beginTransaction(bool readOnly) {
void Manager::endTransaction(Transaction* tx) { _transactions.end(tx); }
bool Manager::post(std::function<void()> fn) { return _schedulerPost(fn); }
std::tuple<bool, Metadata, std::shared_ptr<Table>> Manager::registerCache(
uint64_t fixedSize, uint64_t maxSize) {
TRI_ASSERT(_state.isLocked());
@ -477,8 +479,6 @@ bool Manager::globalProcessRunning() const {
return _state.isSet(State::Flag::rebalancing, State::Flag::resizing);
}
boost::asio::io_service* Manager::ioService() { return _ioService; }
void Manager::prepareTask(Manager::TaskEnvironment environment) {
_outstandingTasks++;
switch (environment) {

View File

@ -79,6 +79,8 @@ class Manager {
size_t operator()(const std::weak_ptr<Cache>& wp) const;
};
typedef std::function<bool(std::function<void()>)> PostFn;
public:
static const uint64_t minSize;
typedef FrequencyBuffer<std::weak_ptr<Cache>, cmp_weak_ptr, hash_weak_ptr>
@ -89,9 +91,10 @@ class Manager {
public:
//////////////////////////////////////////////////////////////////////////////
/// @brief Initialize the manager with an io_service and global usage limit.
/// @brief Initialize the manager with a scheduler post method and global
/// usage limit.
//////////////////////////////////////////////////////////////////////////////
Manager(boost::asio::io_service* ioService, uint64_t globalLimit,
Manager(PostFn schedulerPost, uint64_t globalLimit,
bool enableWindowedStats = true);
~Manager();
@ -163,6 +166,11 @@ class Manager {
//////////////////////////////////////////////////////////////////////////////
void endTransaction(Transaction* tx);
//////////////////////////////////////////////////////////////////////////////
/// @brief Post a function to the scheduler
//////////////////////////////////////////////////////////////////////////////
bool post(std::function<void()> fn);
private:
// use sizeof(std::shared_ptr<Cache>) + 32 for sizeof
// std::set<std::shared_ptr<Cache>> node -- should be valid for most libraries
@ -206,7 +214,7 @@ class Manager {
// task management
enum TaskEnvironment { none, rebalancing, resizing };
boost::asio::io_service* _ioService;
PostFn _schedulerPost;
uint64_t _resizeAttempt;
std::atomic<uint64_t> _outstandingTasks;
std::atomic<uint64_t> _rebalancingTasks;
@ -248,9 +256,6 @@ class Manager {
// check if there is already a global process running
bool globalProcessRunning() const;
// expose io_service
boost::asio::io_service* ioService();
// coordinate state with task lifecycles
void prepareTask(TaskEnvironment environment);
void unprepareTask(TaskEnvironment environment);

View File

@ -27,8 +27,6 @@
#include "Cache/Cache.h"
#include "Cache/Manager.h"
#include "Cache/Metadata.h"
#include "Scheduler/Scheduler.h"
#include "Scheduler/SchedulerFeature.h"
using namespace arangodb::cache;
@ -39,16 +37,9 @@ FreeMemoryTask::FreeMemoryTask(Manager::TaskEnvironment environment,
FreeMemoryTask::~FreeMemoryTask() {}
bool FreeMemoryTask::dispatch() {
auto scheduler = SchedulerFeature::SCHEDULER;
if (scheduler == nullptr) {
return false;
}
_manager->prepareTask(_environment);
auto self = shared_from_this();
scheduler->post([self, this]() -> void { run(); });
return true;
return _manager->post([self, this]() -> void { run(); });
}
void FreeMemoryTask::run() {
@ -80,16 +71,9 @@ MigrateTask::MigrateTask(Manager::TaskEnvironment environment, Manager* manager,
MigrateTask::~MigrateTask() {}
bool MigrateTask::dispatch() {
auto scheduler = SchedulerFeature::SCHEDULER;
if (scheduler == nullptr) {
return false;
}
_manager->prepareTask(_environment);
auto self = shared_from_this();
scheduler->post([self, this]() -> void { run(); });
return true;
return _manager->post([self, this]() -> void { run(); });
}
void MigrateTask::run() {

View File

@ -64,7 +64,7 @@ bool PlainBucket::isFull() const {
return !hasEmptySlot;
}
CachedValue* PlainBucket::find(uint32_t hash, void const* key, uint32_t keySize,
CachedValue* PlainBucket::find(uint32_t hash, void const* key, size_t keySize,
bool moveToFront) {
TRI_ASSERT(isLocked());
CachedValue* result = nullptr;
@ -102,7 +102,7 @@ void PlainBucket::insert(uint32_t hash, CachedValue* value) {
}
CachedValue* PlainBucket::remove(uint32_t hash, void const* key,
uint32_t keySize) {
size_t keySize) {
TRI_ASSERT(isLocked());
CachedValue* value = find(hash, key, keySize, false);
if (value != nullptr) {

View File

@ -100,7 +100,7 @@ struct PlainBucket {
/// bucket to allow basic LRU semantics. If no matching entry is found,
/// nothing will be changed and a nullptr will be returned.
//////////////////////////////////////////////////////////////////////////////
CachedValue* find(uint32_t hash, void const* key, uint32_t keySize,
CachedValue* find(uint32_t hash, void const* key, size_t keySize,
bool moveToFront = true);
//////////////////////////////////////////////////////////////////////////////
@ -125,7 +125,7 @@ struct PlainBucket {
/// to the value. Upon removal, the empty slot generated is moved to the back
/// of the bucket (to remove the gap).
//////////////////////////////////////////////////////////////////////////////
CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize);
CachedValue* remove(uint32_t hash, void const* key, size_t keySize);
//////////////////////////////////////////////////////////////////////////////
/// @brief Searches for the best candidate in the bucket to evict. Requires

View File

@ -72,7 +72,7 @@ bool TransactionalBucket::isFull() const {
}
CachedValue* TransactionalBucket::find(uint32_t hash, void const* key,
uint32_t keySize, bool moveToFront) {
size_t keySize, bool moveToFront) {
TRI_ASSERT(isLocked());
CachedValue* result = nullptr;
@ -112,7 +112,7 @@ void TransactionalBucket::insert(uint32_t hash, CachedValue* value) {
}
CachedValue* TransactionalBucket::remove(uint32_t hash, void const* key,
uint32_t keySize) {
size_t keySize) {
TRI_ASSERT(isLocked());
CachedValue* value = find(hash, key, keySize, false);
if (value != nullptr) {
@ -123,7 +123,7 @@ CachedValue* TransactionalBucket::remove(uint32_t hash, void const* key,
}
CachedValue* TransactionalBucket::blacklist(uint32_t hash, void const* key,
uint32_t keySize) {
size_t keySize) {
TRI_ASSERT(isLocked());
if (!haveOpenTransaction()) {
return nullptr;

View File

@ -112,7 +112,7 @@ struct TransactionalBucket {
/// bucket to allow basic LRU semantics. If no matching entry is found,
/// nothing will be changed and a nullptr will be returned.
//////////////////////////////////////////////////////////////////////////////
CachedValue* find(uint32_t hash, void const* key, uint32_t keySize,
CachedValue* find(uint32_t hash, void const* key, size_t keySize,
bool moveToFront = true);
//////////////////////////////////////////////////////////////////////////////
@ -140,7 +140,7 @@ struct TransactionalBucket {
/// to the value. Upon removal, the empty slot generated is moved to the back
/// of the bucket (to remove the gap).
//////////////////////////////////////////////////////////////////////////////
CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize);
CachedValue* remove(uint32_t hash, void const* key, size_t keySize);
//////////////////////////////////////////////////////////////////////////////
/// @brief Blacklists a key and removes it if it exists. Requires state to
@ -150,7 +150,7 @@ struct TransactionalBucket {
/// hash associated with the key. If there are no empty blacklist slots, fully
/// blacklist the bucket.
//////////////////////////////////////////////////////////////////////////////
CachedValue* blacklist(uint32_t hash, void const* key, uint32_t keySize);
CachedValue* blacklist(uint32_t hash, void const* key, size_t keySize);
//////////////////////////////////////////////////////////////////////////////
/// @brief Checks whether a given hash is blacklisted. Requires state to be

View File

@ -47,7 +47,8 @@ using namespace arangodb::cache;
TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
SECTION("test basic constructor function") {
uint64_t requestLimit = 1024 * 1024;
Manager manager(nullptr, requestLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, requestLimit);
REQUIRE(requestLimit == manager.globalLimit());
@ -66,7 +67,11 @@ TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
SECTION("test mixed cache types under mixed load") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024ULL * 1024ULL * 1024ULL);
size_t cacheCount = 4;
size_t threadCount = 4;
std::vector<std::shared_ptr<Cache>> caches;
@ -172,7 +177,11 @@ TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
SECTION("test manager under cache lifecycle chaos") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024ULL * 1024ULL * 1024ULL);
size_t threadCount = 4;
uint64_t operationCount = 4ULL * 1024ULL;

View File

@ -56,4 +56,4 @@ MockScheduler::~MockScheduler() {
_ioService->stop();
}
boost::asio::io_service* MockScheduler::ioService() { return _ioService.get(); }
void MockScheduler::post(std::function<void()> fn) { _ioService->post(fn); }

View File

@ -48,7 +48,7 @@ class MockScheduler {
public:
MockScheduler(size_t threads);
~MockScheduler();
boost::asio::io_service* ioService();
void post(std::function<void()> fn);
};
}; // end namespace cache

View File

@ -44,7 +44,8 @@ using namespace arangodb::cache;
TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("test basic cache creation") {
Manager manager(nullptr, 1024 * 1024);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 1024 * 1024);
auto cache1 = manager.createCache(CacheType::Plain, false, 256 * 1024);
REQUIRE(true);
auto cache2 = manager.createCache(CacheType::Plain, false, 512 * 1024);
@ -60,7 +61,8 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("check that insertion works as expected") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cache = manager.createCache(CacheType::Plain, false, cacheLimit);
for (uint64_t i = 0; i < 1024; i++) {
@ -110,7 +112,8 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("test that removal works as expected") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cache = manager.createCache(CacheType::Plain, false, cacheLimit);
for (uint64_t i = 0; i < 1024; i++) {
@ -168,7 +171,11 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("verify that cache can indeed grow when it runs out of space") {
uint64_t minimumUsage = 1024 * 1024;
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024 * 1024 * 1024);
auto cache = manager.createCache(CacheType::Plain);
for (uint64_t i = 0; i < 4 * 1024 * 1024; i++) {
@ -189,7 +196,11 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("test behavior under mixed load") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024 * 1024 * 1024);
size_t threadCount = 4;
std::shared_ptr<Cache> cache = manager.createCache(CacheType::Plain);
@ -279,7 +290,8 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
SECTION("test hit rate statistics reporting") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cacheMiss = manager.createCache(CacheType::Plain, true, cacheLimit);
auto cacheHit = manager.createCache(CacheType::Plain, true, cacheLimit);
auto cacheMixed = manager.createCache(CacheType::Plain, true, cacheLimit);

View File

@ -50,7 +50,11 @@ TEST_CASE("cache::Rebalancer", "[cache][!hide][longRunning]") {
SECTION("test rebalancing with PlainCache") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 128 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 128 * 1024 * 1024);
Rebalancer rebalancer(&manager);
size_t cacheCount = 4;
@ -172,7 +176,11 @@ TEST_CASE("cache::Rebalancer", "[cache][!hide][longRunning]") {
SECTION("test rebalancing with TransactionalCache") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 128 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 128 * 1024 * 1024);
Rebalancer rebalancer(&manager);
size_t cacheCount = 4;

View File

@ -45,7 +45,8 @@ using namespace arangodb::cache;
TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("test basic cache construction") {
Manager manager(nullptr, 1024 * 1024);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 1024 * 1024);
auto cache1 =
manager.createCache(CacheType::Transactional, false, 256 * 1024);
auto cache2 =
@ -62,7 +63,8 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("verify that insertion works as expected") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cache =
manager.createCache(CacheType::Transactional, false, cacheLimit);
@ -113,7 +115,8 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("verify removal works as expected") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cache =
manager.createCache(CacheType::Transactional, false, cacheLimit);
@ -171,7 +174,8 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("verify blacklisting works as expected") {
uint64_t cacheLimit = 256 * 1024;
Manager manager(nullptr, 4 * cacheLimit);
auto postFn = [](std::function<void()>) -> bool { return false; };
Manager manager(postFn, 4 * cacheLimit);
auto cache =
manager.createCache(CacheType::Transactional, false, cacheLimit);
@ -236,7 +240,11 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("verify cache can grow correctly when it runs out of space") {
uint64_t minimumUsage = 1024 * 1024;
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024 * 1024 * 1024);
auto cache = manager.createCache(CacheType::Transactional);
for (uint64_t i = 0; i < 4 * 1024 * 1024; i++) {
@ -257,7 +265,11 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
SECTION("test behavior under mixed load") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 1024 * 1024 * 1024);
size_t threadCount = 4;
std::shared_ptr<Cache> cache =
manager.createCache(CacheType::Transactional);

View File

@ -75,7 +75,11 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
SECTION("test hit rate for read-only hotset workload") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 16 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 16 * 1024 * 1024);
TransactionalStore store(&manager);
uint64_t totalDocuments = 1000000;
uint64_t hotsetSize = 50000;
@ -124,7 +128,11 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
SECTION("test hit rate for mixed workload") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 256 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 256 * 1024 * 1024);
TransactionalStore store(&manager);
uint64_t totalDocuments = 1000000;
uint64_t batchSize = 1000;
@ -194,7 +202,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
auto hitRates = manager.globalHitRates();
CHECK(hitRates.first >= 0.1);
CHECK(hitRates.second >= 5.0);
CHECK(hitRates.second >= 2.5);
RandomGenerator::shutdown();
}
@ -202,7 +210,11 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
SECTION("test transactionality for mixed workload") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 256 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 256 * 1024 * 1024);
TransactionalStore store(&manager);
uint64_t totalDocuments = 1000000;
uint64_t writeBatchSize = 1000;
@ -285,7 +297,11 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
SECTION("test rebalancing in the wild") {
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 16 * 1024 * 1024);
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
scheduler.post(fn);
return true;
};
Manager manager(postFn, 16 * 1024 * 1024);
Rebalancer rebalancer(&manager);
TransactionalStore store1(&manager);
TransactionalStore store2(&manager);