mirror of https://gitee.com/bigwinds/arangodb
Bug fix 3.4/scheduler empty reformat (#7872)
* added check for empty scheduler * removed log, old is 1 not 0 * require running in this thread * test * added isDirect to callback * signature fixed * added drain * added allowDirectHandling * disabled for testing * Add ExecContextScope object to direct call. * try alternate initialization of ExecContextScope * remove ExecContextScope, no help. try _fifoSize as part of direct decision. * strand management to minimize reuse of same strand per listen socket * blind attempt to address Jenkins shutdown lock up. may remove quickly. * add filename and line to existing error log message * Adjust queueOperation() to stop accepting items once isStopping() becomes true. * revert previous check-in to MMFilesCollectorThread.cpp * big reformat * fixed merge conflicts * Add CHANGELOG entry.
This commit is contained in:
parent
9c7cef5a44
commit
474f0cde31
|
@ -53,19 +53,19 @@ namespace asio {
|
|||
* if any of the following conditions are true:
|
||||
*
|
||||
* @li @c s.post(a) happens-before @c s.post(b)
|
||||
*
|
||||
*
|
||||
* @li @c s.post(a) happens-before @c s.dispatch(b), where the latter is
|
||||
* performed outside the strand
|
||||
*
|
||||
*
|
||||
* @li @c s.dispatch(a) happens-before @c s.post(b), where the former is
|
||||
* performed outside the strand
|
||||
*
|
||||
*
|
||||
* @li @c s.dispatch(a) happens-before @c s.dispatch(b), where both are
|
||||
* performed outside the strand
|
||||
*
|
||||
*
|
||||
* then @c asio_handler_invoke(a1, &a1) happens-before
|
||||
* @c asio_handler_invoke(b1, &b1).
|
||||
*
|
||||
*
|
||||
* Note that in the following case:
|
||||
* @code async_op_1(..., s.wrap(a));
|
||||
* async_op_2(..., s.wrap(b)); @endcode
|
||||
|
@ -368,7 +368,7 @@ public:
|
|||
return a.impl_ != b.impl_;
|
||||
}
|
||||
|
||||
private:
|
||||
public:
|
||||
asio::detail::strand_service& service_;
|
||||
mutable asio::detail::strand_service::implementation_type impl_;
|
||||
};
|
||||
|
|
|
@ -76,6 +76,8 @@ v3.4.2 (XXXX-XX-XX)
|
|||
|
||||
* Added --server.jwt-secret-keyfile option.
|
||||
|
||||
* Improve single threaded performance by scheduler optimization.
|
||||
|
||||
v3.4.1 (2018-12-19)
|
||||
-------------------
|
||||
|
||||
|
|
|
@ -978,7 +978,7 @@ void AstNode::toVelocyPackValue(VPackBuilder& builder) const {
|
|||
|
||||
if (type == NODE_TYPE_OBJECT) {
|
||||
builder.openObject();
|
||||
|
||||
|
||||
std::unordered_set<VPackStringRef> keys;
|
||||
size_t const n = numMembers();
|
||||
|
||||
|
|
|
@ -700,7 +700,7 @@ AqlValue Expression::executeSimpleExpressionObject(AstNode const* node,
|
|||
if (mustCheckUniqueness) {
|
||||
std::string key(member->getString());
|
||||
|
||||
// track each individual object key
|
||||
// track each individual object key
|
||||
auto it = keys.find(key);
|
||||
|
||||
if (it != keys.end()) {
|
||||
|
@ -715,7 +715,8 @@ AqlValue Expression::executeSimpleExpressionObject(AstNode const* node,
|
|||
keys.emplace(std::move(key));
|
||||
}
|
||||
} else {
|
||||
builder->add(VPackValuePair(member->getStringValue(), member->getStringLength(), VPackValueType::String));
|
||||
builder->add(VPackValuePair(member->getStringValue(),
|
||||
member->getStringLength(), VPackValueType::String));
|
||||
}
|
||||
|
||||
// value
|
||||
|
|
|
@ -84,7 +84,7 @@ VPackSlice QueryResultCursor::next() {
|
|||
size_t QueryResultCursor::count() const { return _iterator.size(); }
|
||||
|
||||
std::pair<ExecutionState, Result> QueryResultCursor::dump(VPackBuilder& builder,
|
||||
std::function<void()> const&) {
|
||||
std::function<void(bool)> const&) {
|
||||
// This cursor cannot block, result already there.
|
||||
auto res = dumpSync(builder);
|
||||
return {ExecutionState::DONE, res};
|
||||
|
@ -216,7 +216,7 @@ void QueryStreamCursor::kill() {
|
|||
}
|
||||
|
||||
std::pair<ExecutionState, Result> QueryStreamCursor::dump(VPackBuilder& builder,
|
||||
std::function<void()> const& ch) {
|
||||
std::function<void(bool)> const& ch) {
|
||||
TRI_ASSERT(batchSize() > 0);
|
||||
LOG_TOPIC(TRACE, Logger::QUERIES) << "executing query " << _id << ": '"
|
||||
<< _query->queryString().extract(1024) << "'";
|
||||
|
|
|
@ -58,7 +58,7 @@ class QueryResultCursor final : public arangodb::Cursor {
|
|||
size_t count() const override final;
|
||||
|
||||
std::pair<ExecutionState, Result> dump(velocypack::Builder& result,
|
||||
std::function<void()> const& continueHandler) override final;
|
||||
std::function<void(bool)> const& continueHandler) override final;
|
||||
|
||||
Result dumpSync(velocypack::Builder& result) override final;
|
||||
|
||||
|
@ -97,7 +97,7 @@ class QueryStreamCursor final : public arangodb::Cursor {
|
|||
size_t count() const override final { return 0; }
|
||||
|
||||
std::pair<ExecutionState, Result> dump(velocypack::Builder& result,
|
||||
std::function<void()> const& continueHandler) override final;
|
||||
std::function<void(bool)> const& continueHandler) override final;
|
||||
|
||||
Result dumpSync(velocypack::Builder& result) override final;
|
||||
|
||||
|
|
|
@ -668,7 +668,7 @@ RestStatus RestAqlHandler::handleUseQuery(std::string const& operation, Query* q
|
|||
|
||||
auto self = shared_from_this();
|
||||
std::shared_ptr<SharedQueryState> ss = query->sharedState();
|
||||
ss->setContinueHandler([this, self, ss]() { continueHandlerExecution(); });
|
||||
ss->setContinueHandler([this, self, ss](bool) { continueHandlerExecution(); });
|
||||
|
||||
bool found;
|
||||
std::string const& shardId = _request->header("shard-id", found);
|
||||
|
|
|
@ -191,7 +191,7 @@ void Scopes::addVariable(Variable* variable) {
|
|||
void Scopes::replaceVariable(Variable* variable) {
|
||||
TRI_ASSERT(!_activeScopes.empty());
|
||||
TRI_ASSERT(variable != nullptr);
|
||||
|
||||
|
||||
_activeScopes.back()->addVariable(variable);
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ void SharedQueryState::setContinueCallback() noexcept {
|
|||
|
||||
/// @brief setter for the continue handler:
|
||||
/// We can either have a handler or a callback
|
||||
void SharedQueryState::setContinueHandler(std::function<void()> const& handler) {
|
||||
void SharedQueryState::setContinueHandler(std::function<void(bool)> const& handler) {
|
||||
std::lock_guard<std::mutex> guard(_mutex);
|
||||
_continueCallback = handler;
|
||||
_hasHandler = true;
|
||||
|
|
|
@ -83,7 +83,7 @@ class SharedQueryState {
|
|||
|
||||
/// @brief setter for the continue handler:
|
||||
/// We can either have a handler or a callback
|
||||
void setContinueHandler(std::function<void()> const& handler);
|
||||
void setContinueHandler(std::function<void(bool)> const& handler);
|
||||
|
||||
private:
|
||||
/// execute the _continueCallback. must hold _mutex
|
||||
|
@ -96,7 +96,7 @@ class SharedQueryState {
|
|||
/// @brief a callback function which is used to implement continueAfterPause.
|
||||
/// Typically, the RestHandler using the Query object will put a closure
|
||||
/// in here, which continueAfterPause simply calls.
|
||||
std::function<void()> _continueCallback;
|
||||
std::function<void(bool)> _continueCallback;
|
||||
|
||||
bool _wasNotified;
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ void CacheManagerFeature::validateOptions(std::shared_ptr<options::ProgramOption
|
|||
|
||||
void CacheManagerFeature::start() {
|
||||
auto scheduler = SchedulerFeature::SCHEDULER;
|
||||
auto postFn = [scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler->queue(RequestPriority::LOW, fn);
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -280,7 +280,7 @@ void Manager::endTransaction(Transaction* tx) noexcept {
|
|||
_transactions.end(tx);
|
||||
}
|
||||
|
||||
bool Manager::post(std::function<void()> fn) { return _schedulerPost(fn); }
|
||||
bool Manager::post(std::function<void(bool)> fn) { return _schedulerPost(fn); }
|
||||
|
||||
std::tuple<bool, Metadata, std::shared_ptr<Table>> Manager::registerCache(uint64_t fixedSize,
|
||||
uint64_t maxSize) {
|
||||
|
|
|
@ -72,7 +72,7 @@ class Rebalancer; // forward declaration
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
class Manager {
|
||||
protected:
|
||||
typedef std::function<bool(std::function<void()>)> PostFn;
|
||||
typedef std::function<bool(std::function<void(bool)>)> PostFn;
|
||||
|
||||
public:
|
||||
static const uint64_t minSize;
|
||||
|
@ -159,7 +159,7 @@ class Manager {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Post a function to the scheduler
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool post(std::function<void()> fn);
|
||||
bool post(std::function<void(bool)> fn);
|
||||
|
||||
private:
|
||||
// use sizeof(uint64_t) + sizeof(std::shared_ptr<Cache>) + 64 for upper bound
|
||||
|
|
|
@ -38,7 +38,7 @@ FreeMemoryTask::~FreeMemoryTask() {}
|
|||
bool FreeMemoryTask::dispatch() {
|
||||
_manager->prepareTask(_environment);
|
||||
auto self = shared_from_this();
|
||||
return _manager->post([self, this]() -> void { run(); });
|
||||
return _manager->post([self, this](bool) -> void { run(); });
|
||||
}
|
||||
|
||||
void FreeMemoryTask::run() {
|
||||
|
@ -68,7 +68,7 @@ MigrateTask::~MigrateTask() {}
|
|||
bool MigrateTask::dispatch() {
|
||||
_manager->prepareTask(_environment);
|
||||
auto self = shared_from_this();
|
||||
return _manager->post([self, this]() -> void { run(); });
|
||||
return _manager->post([self, this](bool) -> void { run(); });
|
||||
}
|
||||
|
||||
void MigrateTask::run() {
|
||||
|
|
|
@ -1222,7 +1222,7 @@ void ClusterComm::disable() {
|
|||
}
|
||||
}
|
||||
|
||||
void ClusterComm::scheduleMe(std::function<void()> task) {
|
||||
void ClusterComm::scheduleMe(std::function<void(bool)> task) {
|
||||
arangodb::SchedulerFeature::SCHEDULER->queue(RequestPriority::HIGH, task);
|
||||
}
|
||||
|
||||
|
|
|
@ -595,7 +595,7 @@ class ClusterComm {
|
|||
/// public static function that any object can use.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void scheduleMe(std::function<void()> task);
|
||||
static void scheduleMe(std::function<void(bool)> task);
|
||||
|
||||
protected: // protected members are for unit test purposes
|
||||
/// @brief Constructor for test cases.
|
||||
|
|
|
@ -436,9 +436,16 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
|||
auto const prio = handler->priority();
|
||||
auto self = shared_from_this();
|
||||
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(prio, [self, this, handler]() {
|
||||
handleRequestDirectly(basics::ConditionalLocking::DoLock, std::move(handler));
|
||||
});
|
||||
bool tryDirect = allowDirectHandling() && _peer->runningInThisThread();
|
||||
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(
|
||||
prio,
|
||||
[self, this, handler](bool isDirect) {
|
||||
handleRequest(isDirect ? basics::ConditionalLocking::DoNotLock
|
||||
: basics::ConditionalLocking::DoLock,
|
||||
std::move(handler));
|
||||
},
|
||||
tryDirect);
|
||||
|
||||
uint64_t messageId = handler->messageId();
|
||||
|
||||
|
@ -452,13 +459,12 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
|||
}
|
||||
|
||||
// Just run the handler, could have been called in a different thread
|
||||
void GeneralCommTask::handleRequestDirectly(bool doLock, std::shared_ptr<RestHandler> handler) {
|
||||
void GeneralCommTask::handleRequest(bool doLock, std::shared_ptr<RestHandler> handler) {
|
||||
TRI_ASSERT(doLock || _peer->runningInThisThread());
|
||||
|
||||
auto self = shared_from_this();
|
||||
handler->runHandler([self, this, doLock](rest::RestHandler* handler) {
|
||||
RequestStatistics* stat = handler->stealStatistics();
|
||||
// TODO we could reduce all of this to strand::dispatch ?
|
||||
if (doLock || !_peer->runningInThisThread()) {
|
||||
// Note that the latter is for the case that a handler was put to sleep
|
||||
// and woke up in a different thread.
|
||||
|
@ -482,14 +488,14 @@ bool GeneralCommTask::handleRequestAsync(std::shared_ptr<RestHandler> handler,
|
|||
*jobId = handler->handlerId();
|
||||
|
||||
// callback will persist the response with the AsyncJobManager
|
||||
return SchedulerFeature::SCHEDULER->queue(handler->priority(), [self, handler] {
|
||||
return SchedulerFeature::SCHEDULER->queue(handler->priority(), [self, handler](bool) {
|
||||
handler->runHandler([](RestHandler* h) {
|
||||
GeneralServerFeature::JOB_MANAGER->finishAsyncJob(h);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
// here the response will just be ignored
|
||||
return SchedulerFeature::SCHEDULER->queue(handler->priority(), [self, handler] {
|
||||
return SchedulerFeature::SCHEDULER->queue(handler->priority(), [self, handler](bool) {
|
||||
handler->runHandler([](RestHandler*) {});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -103,6 +103,8 @@ class GeneralCommTask : public SocketTask {
|
|||
/// @brief send the response to the client.
|
||||
virtual void addResponse(GeneralResponse&, RequestStatistics*) = 0;
|
||||
|
||||
virtual bool allowDirectHandling() const = 0;
|
||||
|
||||
protected:
|
||||
enum class RequestFlow : bool { Continue = true, Abort = false };
|
||||
|
||||
|
@ -146,7 +148,7 @@ class GeneralCommTask : public SocketTask {
|
|||
|
||||
private:
|
||||
bool handleRequestSync(std::shared_ptr<RestHandler>);
|
||||
void handleRequestDirectly(bool doLock, std::shared_ptr<RestHandler>);
|
||||
void handleRequest(bool doLock, std::shared_ptr<RestHandler>);
|
||||
bool handleRequestAsync(std::shared_ptr<RestHandler>, uint64_t* jobId = nullptr);
|
||||
};
|
||||
} // namespace rest
|
||||
|
|
|
@ -36,6 +36,8 @@ class HttpCommTask final : public GeneralCommTask {
|
|||
|
||||
void addResponse(GeneralResponse& response, RequestStatistics* stat) override;
|
||||
|
||||
bool allowDirectHandling() const override final { return true; }
|
||||
|
||||
/// @brief send error response including response body
|
||||
void addSimpleResponse(rest::ResponseCode, rest::ContentType, uint64_t messageId,
|
||||
velocypack::Buffer<uint8_t>&&) override;
|
||||
|
|
|
@ -65,6 +65,8 @@ class VstCommTask final : public GeneralCommTask {
|
|||
// internal addResponse
|
||||
void addResponse(GeneralResponse&, RequestStatistics*) override;
|
||||
|
||||
bool allowDirectHandling() const override final { return false; }
|
||||
|
||||
private:
|
||||
// process the VST 1000 request type
|
||||
void handleAuthHeader(VPackSlice const& header, uint64_t messageId);
|
||||
|
|
|
@ -725,7 +725,3 @@ size_t IResearchViewMetaState::memory() const {
|
|||
|
||||
NS_END // iresearch
|
||||
NS_END // arangodb
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -1624,7 +1624,7 @@ int MMFilesCollection::fillIndexes(transaction::Methods* trx,
|
|||
_logicalCollection.vocbase().name() + "/" + _logicalCollection.name() +
|
||||
" }, indexes: " + std::to_string(n - 1));
|
||||
|
||||
auto poster = [](std::function<void()> fn) -> void {
|
||||
auto poster = [](std::function<void(bool)> fn) -> void {
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, fn);
|
||||
};
|
||||
auto queue = std::make_shared<arangodb::basics::LocalTaskQueue>(poster);
|
||||
|
|
|
@ -124,7 +124,8 @@ void MMFilesDitches::destroy() {
|
|||
delete ptr;
|
||||
} else if (type == MMFilesDitch::TRI_DITCH_DOCUMENT) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "logic error. shouldn't have document ditches on unload";
|
||||
<< "logic error. shouldn't have document ditches on unload"
|
||||
<< " (file " << ptr->filename() << ", line " << ptr->line() << ")";
|
||||
TRI_ASSERT(false);
|
||||
} else {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES) << "unknown ditch type";
|
||||
|
@ -182,17 +183,13 @@ MMFilesDitch* MMFilesDitches::process(bool& popped,
|
|||
// note that there is no need to check the entire list for a
|
||||
// MMFilesDocumentDitch as the list is filled up in chronological order. New
|
||||
// ditches are always added to the tail of the list, and if we have the
|
||||
// following list HEAD -> TRI_DITCH_DATAFILE_CALLBACK -> TRI_DITCH_DOCUMENT then
|
||||
// it is still safe to execute the datafile callback operation, even if there
|
||||
// is a TRI_DITCH_DOCUMENT after it.
|
||||
// This is the case because the TRI_DITCH_DATAFILE_CALLBACK is only put into
|
||||
// the
|
||||
// ditches list after changing the pointers in all headers. After the pointers
|
||||
// are
|
||||
// changed, it is safe to unload/remove an old datafile (that noone points
|
||||
// to). And
|
||||
// any newer TRI_DITCH_DOCUMENTs will always reference data inside other
|
||||
// datafiles.
|
||||
// following list HEAD -> TRI_DITCH_DATAFILE_CALLBACK -> TRI_DITCH_DOCUMENT
|
||||
// then it is still safe to execute the datafile callback operation, even if
|
||||
// there is a TRI_DITCH_DOCUMENT after it. This is the case because the
|
||||
// TRI_DITCH_DATAFILE_CALLBACK is only put into the ditches list after
|
||||
// changing the pointers in all headers. After the pointers are changed, it is
|
||||
// safe to unload/remove an old datafile (that noone points to). And any newer
|
||||
// TRI_DITCH_DOCUMENTs will always reference data inside other datafiles.
|
||||
|
||||
if (!callback(ditch)) {
|
||||
return ditch;
|
||||
|
|
|
@ -73,8 +73,8 @@ VPackSlice MMFilesExportCursor::next() {
|
|||
|
||||
size_t MMFilesExportCursor::count() const { return _size; }
|
||||
|
||||
std::pair<aql::ExecutionState, Result> MMFilesExportCursor::dump(VPackBuilder& builder,
|
||||
std::function<void()> const&) {
|
||||
std::pair<aql::ExecutionState, Result> MMFilesExportCursor::dump(
|
||||
VPackBuilder& builder, std::function<void(bool)> const&) {
|
||||
return {aql::ExecutionState::DONE, dumpSync(builder)};
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ class MMFilesExportCursor final : public Cursor {
|
|||
size_t count() const override final;
|
||||
|
||||
std::pair<arangodb::aql::ExecutionState, Result> dump(velocypack::Builder& result,
|
||||
std::function<void()> const& ch) override final;
|
||||
std::function<void(bool)> const& ch) override final;
|
||||
|
||||
Result dumpSync(velocypack::Builder& result) override final;
|
||||
|
||||
|
|
|
@ -675,7 +675,7 @@ void MMFilesHashIndex::batchInsertUnique(
|
|||
|
||||
// queue cleanup callback
|
||||
auto allocator = _allocator.get();
|
||||
auto callback = [elements, queue, allocator]() -> void {
|
||||
auto callback = [elements, queue, allocator](bool) -> void {
|
||||
if (queue->status() != TRI_ERROR_NO_ERROR) {
|
||||
for (auto& it : *(elements.get())) {
|
||||
// free all elements to prevent leak
|
||||
|
@ -795,7 +795,7 @@ void MMFilesHashIndex::batchInsertMulti(
|
|||
|
||||
// queue cleanup callback
|
||||
auto allocator = _allocator.get();
|
||||
auto callback = [elements, queue, allocator]() -> void {
|
||||
auto callback = [elements, queue, allocator](bool) -> void {
|
||||
if (queue->status() != TRI_ERROR_NO_ERROR) {
|
||||
// free all elements to prevent leak
|
||||
for (auto& it : *(elements.get())) {
|
||||
|
|
|
@ -298,7 +298,7 @@ VPackBuilder Conductor::finishedWorkerStep(VPackSlice const& data) {
|
|||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
// don't block the response for workers waiting on this callback
|
||||
// this should allow workers to go into the IDLE state
|
||||
scheduler->queue(RequestPriority::LOW, [this] {
|
||||
scheduler->queue(RequestPriority::LOW, [this](bool) {
|
||||
MUTEX_LOCKER(guard, _callbackMutex);
|
||||
|
||||
if (_state == ExecutionState::RUNNING) {
|
||||
|
@ -729,7 +729,7 @@ int Conductor::_sendToAllDBServers(std::string const& path, VPackBuilder const&
|
|||
} else {
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [this, path, message] {
|
||||
scheduler->queue(RequestPriority::LOW, [this, path, message](bool) {
|
||||
VPackBuilder response;
|
||||
|
||||
PregelFeature::handleWorkerRequest(_vocbaseGuard.database(), path,
|
||||
|
|
|
@ -187,7 +187,8 @@ std::map<CollectionID, std::vector<VertexShardInfo>> GraphStore<V, E>::_allocate
|
|||
}
|
||||
|
||||
template <typename V, typename E>
|
||||
void GraphStore<V, E>::loadShards(WorkerConfig* config, std::function<void()> const& callback) {
|
||||
void GraphStore<V, E>::loadShards(WorkerConfig* config,
|
||||
std::function<void(bool)> const& callback) {
|
||||
_config = config;
|
||||
TRI_ASSERT(_runningThreads == 0);
|
||||
LOG_TOPIC(DEBUG, Logger::PREGEL)
|
||||
|
@ -195,7 +196,7 @@ void GraphStore<V, E>::loadShards(WorkerConfig* config, std::function<void()> co
|
|||
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [this, scheduler, callback] {
|
||||
scheduler->queue(RequestPriority::LOW, [this, scheduler, callback](bool) {
|
||||
// hold the current position where the ith vertex shard can
|
||||
// start to write its data. At the end the offset should equal the
|
||||
// sum of the counts of all ith edge shards
|
||||
|
@ -229,12 +230,12 @@ void GraphStore<V, E>::loadShards(WorkerConfig* config, std::function<void()> co
|
|||
TRI_ASSERT(vertexOff < _index.size());
|
||||
TRI_ASSERT(info.numEdges == 0 || edgeDataOffsets[shardIdx] < _edges->size());
|
||||
|
||||
scheduler->queue(RequestPriority::LOW,
|
||||
[this, &info, &edgeDataOffsets, vertexOff, shardIdx] {
|
||||
TRI_DEFER(_runningThreads--); // exception safe
|
||||
_loadVertices(*info.trx, info.vertexShard, info.edgeShards,
|
||||
vertexOff, edgeDataOffsets[shardIdx]);
|
||||
});
|
||||
scheduler->queue(RequestPriority::LOW, [this, &info, &edgeDataOffsets,
|
||||
vertexOff, shardIdx](bool) {
|
||||
TRI_DEFER(_runningThreads--); // exception safe
|
||||
_loadVertices(*info.trx, info.vertexShard, info.edgeShards,
|
||||
vertexOff, edgeDataOffsets[shardIdx]);
|
||||
});
|
||||
// update to next offset
|
||||
vertexOff += info.numVertices;
|
||||
} catch (...) {
|
||||
|
@ -594,7 +595,8 @@ void GraphStore<V, E>::_storeVertices(std::vector<ShardID> const& globalShards,
|
|||
}
|
||||
|
||||
template <typename V, typename E>
|
||||
void GraphStore<V, E>::storeResults(WorkerConfig* config, std::function<void()> const& cb) {
|
||||
void GraphStore<V, E>::storeResults(WorkerConfig* config,
|
||||
std::function<void(bool)> const& cb) {
|
||||
_config = config;
|
||||
|
||||
double now = TRI_microtime();
|
||||
|
@ -608,7 +610,8 @@ void GraphStore<V, E>::storeResults(WorkerConfig* config, std::function<void()>
|
|||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
do {
|
||||
_runningThreads++;
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [this, start, end, now, cb] {
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [this, start, end, now,
|
||||
cb](bool isDirect) {
|
||||
try {
|
||||
RangeIterator<VertexEntry> it = vertexIterator(start, end);
|
||||
_storeVertices(_config->globalShardIDs(), it);
|
||||
|
@ -620,7 +623,7 @@ void GraphStore<V, E>::storeResults(WorkerConfig* config, std::function<void()>
|
|||
if (_runningThreads == 0) {
|
||||
LOG_TOPIC(DEBUG, Logger::PREGEL)
|
||||
<< "Storing data took " << (TRI_microtime() - now) << "s";
|
||||
cb();
|
||||
cb(isDirect);
|
||||
}
|
||||
});
|
||||
start = end;
|
||||
|
|
|
@ -75,7 +75,7 @@ class GraphStore {
|
|||
GraphFormat<V, E> const* graphFormat() { return _graphFormat.get(); }
|
||||
|
||||
// ====================== NOT THREAD SAFE ===========================
|
||||
void loadShards(WorkerConfig* state, std::function<void()> const&);
|
||||
void loadShards(WorkerConfig* state, std::function<void(bool)> const&);
|
||||
void loadDocument(WorkerConfig* config, std::string const& documentID);
|
||||
void loadDocument(WorkerConfig* config, PregelShard sourceShard, PregelKey const& _key);
|
||||
// ======================================================================
|
||||
|
@ -91,7 +91,7 @@ class GraphStore {
|
|||
void replaceVertexData(VertexEntry const* entry, void* data, size_t size);
|
||||
|
||||
/// Write results to database
|
||||
void storeResults(WorkerConfig* config, std::function<void()> const&);
|
||||
void storeResults(WorkerConfig* config, std::function<void(bool)> const&);
|
||||
|
||||
private:
|
||||
std::map<CollectionID, std::vector<VertexShardInfo>> _allocateSpace();
|
||||
|
|
|
@ -270,7 +270,7 @@ void PregelFeature::cleanupWorker(uint64_t executionNumber) {
|
|||
// unmapping etc might need a few seconds
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [this, executionNumber] {
|
||||
scheduler->queue(RequestPriority::LOW, [this, executionNumber](bool) {
|
||||
MUTEX_LOCKER(guard, _mutex);
|
||||
|
||||
auto wit = _workers.find(executionNumber);
|
||||
|
@ -372,7 +372,7 @@ void PregelFeature::handleConductorRequest(std::string const& path, VPackSlice c
|
|||
} else if (path == Utils::cancelGSSPath) {
|
||||
w->cancelGlobalStep(body);
|
||||
} else if (path == Utils::finalizeExecutionPath) {
|
||||
w->finalizeExecution(body, [exeNum] {
|
||||
w->finalizeExecution(body, [exeNum](bool) {
|
||||
if (Instance != nullptr) {
|
||||
Instance->cleanupWorker(exeNum);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ void RecoveryManager::updatedFailedServers() {
|
|||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW,
|
||||
[this, shard] { _renewPrimaryServer(shard); });
|
||||
[this, shard](bool) { _renewPrimaryServer(shard); });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ void Worker<V, E, M>::_initializeMessageCaches() {
|
|||
// @brief load the initial worker data, call conductor eventually
|
||||
template <typename V, typename E, typename M>
|
||||
void Worker<V, E, M>::setupWorker() {
|
||||
std::function<void()> callback = [this] {
|
||||
std::function<void(bool)> callback = [this](bool) {
|
||||
VPackBuilder package;
|
||||
package.openObject();
|
||||
package.add(Utils::senderKey, VPackValue(ServerState::instance()->getId()));
|
||||
|
@ -160,13 +160,13 @@ void Worker<V, E, M>::setupWorker() {
|
|||
for (std::string const& documentID : activeSet) {
|
||||
_graphStore->loadDocument(&_config, documentID);
|
||||
}
|
||||
callback();
|
||||
callback(false);
|
||||
} else {
|
||||
// initialization of the graphstore might take an undefined amount
|
||||
// of time. Therefore this is performed asynchronous
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [this, callback] {
|
||||
scheduler->queue(RequestPriority::LOW, [this, callback](bool) {
|
||||
_graphStore->loadShards(&_config, callback);
|
||||
});
|
||||
}
|
||||
|
@ -325,7 +325,7 @@ void Worker<V, E, M>::_startProcessing() {
|
|||
}
|
||||
size_t i = 0;
|
||||
do {
|
||||
scheduler->queue(RequestPriority::LOW, [this, start, end, i] {
|
||||
scheduler->queue(RequestPriority::LOW, [this, start, end, i](bool) {
|
||||
if (_state != WorkerState::COMPUTING) {
|
||||
LOG_TOPIC(WARN, Logger::PREGEL) << "Execution aborted prematurely.";
|
||||
return;
|
||||
|
@ -588,7 +588,7 @@ void Worker<V, E, M>::_continueAsync() {
|
|||
|
||||
template <typename V, typename E, typename M>
|
||||
void Worker<V, E, M>::finalizeExecution(VPackSlice const& body,
|
||||
std::function<void(void)> callback) {
|
||||
std::function<void(bool)> callback) {
|
||||
// Only expect serial calls from the conductor.
|
||||
// Lock to prevent malicous activity
|
||||
MUTEX_LOCKER(guard, _commandMutex);
|
||||
|
@ -652,7 +652,7 @@ void Worker<V, E, M>::startRecovery(VPackSlice const& data) {
|
|||
_preRecoveryTotal = _graphStore->localVertexCount();
|
||||
WorkerConfig nextState(_config);
|
||||
nextState.updateConfig(data);
|
||||
_graphStore->loadShards(&nextState, [this, nextState, copy] {
|
||||
_graphStore->loadShards(&nextState, [this, nextState, copy](bool) {
|
||||
_config = nextState;
|
||||
compensateStep(copy.slice());
|
||||
});
|
||||
|
@ -667,7 +667,7 @@ void Worker<V, E, M>::compensateStep(VPackSlice const& data) {
|
|||
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [this] {
|
||||
scheduler->queue(RequestPriority::LOW, [this](bool) {
|
||||
if (_state != WorkerState::RECOVERING) {
|
||||
LOG_TOPIC(WARN, Logger::PREGEL) << "Compensation aborted prematurely.";
|
||||
return;
|
||||
|
@ -724,7 +724,7 @@ void Worker<V, E, M>::_callConductor(std::string const& path, VPackBuilder const
|
|||
if (ServerState::instance()->isRunningInCluster() == false) {
|
||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
rest::Scheduler* scheduler = SchedulerFeature::SCHEDULER;
|
||||
scheduler->queue(RequestPriority::LOW, [path, message] {
|
||||
scheduler->queue(RequestPriority::LOW, [path, message](bool) {
|
||||
VPackBuilder response;
|
||||
PregelFeature::handleConductorRequest(path, message.slice(), response);
|
||||
});
|
||||
|
|
|
@ -53,7 +53,7 @@ class IWorker {
|
|||
virtual void cancelGlobalStep(VPackSlice const& data) = 0; // called by coordinator
|
||||
virtual void receivedMessages(VPackSlice const& data) = 0;
|
||||
virtual void finalizeExecution(VPackSlice const& data,
|
||||
std::function<void(void)> callback) = 0;
|
||||
std::function<void(bool)> callback) = 0;
|
||||
virtual void startRecovery(VPackSlice const& data) = 0;
|
||||
virtual void compensateStep(VPackSlice const& data) = 0;
|
||||
virtual void finalizeRecovery(VPackSlice const& data) = 0;
|
||||
|
@ -155,7 +155,7 @@ class Worker : public IWorker {
|
|||
void startGlobalStep(VPackSlice const& data) override;
|
||||
void cancelGlobalStep(VPackSlice const& data) override;
|
||||
void receivedMessages(VPackSlice const& data) override;
|
||||
void finalizeExecution(VPackSlice const& data, std::function<void(void)> callback) override;
|
||||
void finalizeExecution(VPackSlice const& data, std::function<void(bool)> callback) override;
|
||||
void startRecovery(VPackSlice const& data) override;
|
||||
void compensateStep(VPackSlice const& data) override;
|
||||
void finalizeRecovery(VPackSlice const& data) override;
|
||||
|
|
|
@ -735,7 +735,7 @@ Result DatabaseInitialSyncer::fetchCollectionDump(arangodb::LogicalCollection* c
|
|||
// already fetch next batch in the background, by posting the
|
||||
// request to the scheduler, which can run it asynchronously
|
||||
sharedStatus->request([this, self, &stats, &baseUrl, sharedStatus, coll,
|
||||
leaderColl, batch, fromTick, chunkSize]() {
|
||||
leaderColl, batch, fromTick, chunkSize](bool) {
|
||||
fetchDumpChunk(sharedStatus, baseUrl, coll, leaderColl, stats,
|
||||
batch + 1, fromTick, chunkSize);
|
||||
});
|
||||
|
|
|
@ -297,7 +297,7 @@ Result Syncer::JobSynchronizer::waitForResponse(
|
|||
return Result(TRI_ERROR_REPLICATION_APPLIER_STOPPED);
|
||||
}
|
||||
|
||||
void Syncer::JobSynchronizer::request(std::function<void()> const& cb) {
|
||||
void Syncer::JobSynchronizer::request(std::function<void(bool)> const& cb) {
|
||||
// by indicating that we have posted an async job, the caller
|
||||
// will block on exit until all posted jobs have finished
|
||||
if (!jobPosted()) {
|
||||
|
@ -306,14 +306,14 @@ void Syncer::JobSynchronizer::request(std::function<void()> const& cb) {
|
|||
|
||||
try {
|
||||
auto self = shared_from_this();
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [this, self, cb]() {
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [this, self, cb](bool isDirect) {
|
||||
// whatever happens next, when we leave this here, we need to indicate
|
||||
// that there is no more posted job.
|
||||
// otherwise the calling thread may block forever waiting on the posted
|
||||
// jobs to finish
|
||||
auto guard = scopeGuard([this]() { jobDone(); });
|
||||
|
||||
cb();
|
||||
cb(isDirect);
|
||||
});
|
||||
} catch (...) {
|
||||
// will get here only if Scheduler::post threw
|
||||
|
|
|
@ -79,7 +79,7 @@ class Syncer : public std::enable_shared_from_this<Syncer> {
|
|||
/// @brief post an async request to the scheduler
|
||||
/// this will increase the number of inflight jobs, and count it down
|
||||
/// when the posted request has finished
|
||||
void request(std::function<void()> const& cb);
|
||||
void request(std::function<void(bool)> const& cb);
|
||||
|
||||
/// @brief notifies that a job was posted
|
||||
/// returns false if job counter could not be increased (e.g. because
|
||||
|
|
|
@ -1821,7 +1821,7 @@ Result TailingSyncer::processMasterLog(std::shared_ptr<Syncer::JobSynchronizer>
|
|||
mustFetchBatch = false;
|
||||
auto self = shared_from_this();
|
||||
sharedStatus->request([this, self, sharedStatus, fetchTick, lastScannedTick,
|
||||
firstRegularTick]() {
|
||||
firstRegularTick](bool) {
|
||||
fetchMasterLog(sharedStatus, fetchTick, lastScannedTick, firstRegularTick);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -219,7 +219,7 @@ bool RestBatchHandler::executeNextHandler() {
|
|||
}
|
||||
|
||||
// now scheduler the real handler
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(handler->priority(), [this, self, handler]() {
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(handler->priority(), [this, self, handler](bool) {
|
||||
// start to work for this handler
|
||||
// ignore any errors here, will be handled later by inspecting the response
|
||||
try {
|
||||
|
|
|
@ -182,7 +182,7 @@ RestStatus RestCursorHandler::registerQueryOrCursor(VPackSlice const& slice) {
|
|||
|
||||
std::shared_ptr<aql::SharedQueryState> ss = query->sharedState();
|
||||
auto self = shared_from_this();
|
||||
ss->setContinueHandler([this, self, ss] { continueHandlerExecution(); });
|
||||
ss->setContinueHandler([this, self, ss](bool) { continueHandlerExecution(); });
|
||||
|
||||
registerQuery(std::move(query));
|
||||
return processQuery();
|
||||
|
@ -472,7 +472,7 @@ RestStatus RestCursorHandler::generateCursorResult(rest::ResponseCode code,
|
|||
Result r;
|
||||
auto self = shared_from_this();
|
||||
std::tie(state, r) =
|
||||
cursor->dump(builder, [this, self]() { continueHandlerExecution(); });
|
||||
cursor->dump(builder, [this, self](bool) { continueHandlerExecution(); });
|
||||
if (state == aql::ExecutionState::WAITING) {
|
||||
builder.clear();
|
||||
_leasedCursor = cursor;
|
||||
|
|
|
@ -119,11 +119,13 @@ static Result restoreDataParser(char const* ptr, char const* pos,
|
|||
} catch (VPackException const& ex) {
|
||||
// Could not parse the given string
|
||||
return Result{TRI_ERROR_HTTP_CORRUPTED_JSON,
|
||||
"received invalid JSON data for collection '" + collectionName + "': " + ex.what()};
|
||||
"received invalid JSON data for collection '" +
|
||||
collectionName + "': " + ex.what()};
|
||||
} catch (std::exception const& ex) {
|
||||
// Could not even build the string
|
||||
return Result{TRI_ERROR_HTTP_CORRUPTED_JSON,
|
||||
"received invalid JSON data for collection '" + collectionName + "': " + ex.what()};
|
||||
"received invalid JSON data for collection '" +
|
||||
collectionName + "': " + ex.what()};
|
||||
} catch (...) {
|
||||
return Result{TRI_ERROR_INTERNAL};
|
||||
}
|
||||
|
@ -132,7 +134,8 @@ static Result restoreDataParser(char const* ptr, char const* pos,
|
|||
|
||||
if (!slice.isObject()) {
|
||||
return Result{TRI_ERROR_HTTP_CORRUPTED_JSON,
|
||||
"received invalid JSON data for collection '" + collectionName + "': data is no object"};
|
||||
"received invalid JSON data for collection '" +
|
||||
collectionName + "': data is no object"};
|
||||
}
|
||||
|
||||
type = REPLICATION_INVALID;
|
||||
|
@ -140,7 +143,8 @@ static Result restoreDataParser(char const* ptr, char const* pos,
|
|||
for (auto const& pair : VPackObjectIterator(slice, true)) {
|
||||
if (!pair.key.isString()) {
|
||||
return Result{TRI_ERROR_HTTP_CORRUPTED_JSON,
|
||||
"received invalid JSON data for collection '" + collectionName + "': got a non-string key"};
|
||||
"received invalid JSON data for collection '" +
|
||||
collectionName + "': got a non-string key"};
|
||||
}
|
||||
|
||||
if (pair.key.isEqualString(::typeString)) {
|
||||
|
@ -176,7 +180,8 @@ static Result restoreDataParser(char const* ptr, char const* pos,
|
|||
|
||||
if (key.empty()) {
|
||||
return Result{TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"received invalid JSON data for collection '" + collectionName + "': empty key"};
|
||||
"received invalid JSON data for collection '" +
|
||||
collectionName + "': empty key"};
|
||||
}
|
||||
|
||||
return Result{TRI_ERROR_NO_ERROR};
|
||||
|
@ -1181,7 +1186,7 @@ Result RestReplicationHandler::parseBatch(std::string const& collectionName,
|
|||
VPackOptions options = VPackOptions::Defaults;
|
||||
options.checkAttributeUniqueness = true;
|
||||
VPackBuilder builder(&options);
|
||||
|
||||
|
||||
allMarkers.clear();
|
||||
|
||||
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());
|
||||
|
|
|
@ -120,7 +120,7 @@ RestStatus RestTestHandler::execute() {
|
|||
|
||||
auto self(shared_from_this());
|
||||
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(priority(res.get()), [this, self, duration]() {
|
||||
bool ok = SchedulerFeature::SCHEDULER->queue(priority(res.get()), [this, self, duration](bool) {
|
||||
auto stop = clock::now() + duration;
|
||||
|
||||
uint64_t count = 0;
|
||||
|
|
|
@ -387,6 +387,7 @@ void DatabaseFeature::beginShutdown() {
|
|||
|
||||
void DatabaseFeature::stop() {
|
||||
stopAppliers();
|
||||
MUTEX_LOCKER(mutexLocker, _databasesMutex);
|
||||
|
||||
auto unuser(_databasesProtector.use());
|
||||
auto theLists = _databasesLists.load();
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "Basics/MutexLocker.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
|
@ -40,11 +41,40 @@
|
|||
#include "Scheduler/JobGuard.h"
|
||||
#include "Scheduler/Task.h"
|
||||
#include "Statistics/RequestStatistics.h"
|
||||
#include "Utils/ExecContext.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::rest;
|
||||
|
||||
std::unordered_set<void*> gActiveStrandMap;
|
||||
Mutex gActiveStrandMapMutex;
|
||||
|
||||
asio_ns::io_context::strand* Scheduler::newStrand() {
|
||||
asio_ns::io_context::strand* newbie(nullptr);
|
||||
int count(139);
|
||||
std::unordered_set<void*>::iterator it;
|
||||
|
||||
MUTEX_LOCKER(locker, gActiveStrandMapMutex);
|
||||
do {
|
||||
delete newbie;
|
||||
newbie = new asio_ns::io_context::strand(*_ioContext);
|
||||
|
||||
it = gActiveStrandMap.find((void*)(newbie->impl_));
|
||||
} while (gActiveStrandMap.end() != it && --count);
|
||||
|
||||
if (gActiveStrandMap.end() == it) {
|
||||
gActiveStrandMap.insert((void*)(newbie->impl_));
|
||||
}
|
||||
|
||||
return newbie;
|
||||
}
|
||||
|
||||
void Scheduler::releaseStrand(asio_ns::io_context::strand* strandDone) {
|
||||
MUTEX_LOCKER(locker, gActiveStrandMapMutex);
|
||||
gActiveStrandMap.erase((void*)(strandDone->impl_));
|
||||
}
|
||||
|
||||
namespace {
|
||||
// controls how fast excess threads to io_context get pruned.
|
||||
// 60 known to slow down tests that use single client thread (matthewv)
|
||||
|
@ -202,26 +232,36 @@ Scheduler::~Scheduler() {
|
|||
}
|
||||
|
||||
// do not pass callback by reference, might get deleted before execution
|
||||
void Scheduler::post(std::function<void()> const callback) {
|
||||
void Scheduler::post(std::function<void(bool)> const callback, bool isHandler) {
|
||||
// increment number of queued and guard against exceptions
|
||||
incQueued();
|
||||
uint64_t old = incQueued();
|
||||
old += _fifoSize[FIFO1] + _fifoSize[FIFO2] + _fifoSize[FIFO3];
|
||||
|
||||
// reduce queued at the end
|
||||
auto guardQueue = scopeGuard([this]() { decQueued(); });
|
||||
|
||||
// capture without self, ioContext will not live longer than scheduler
|
||||
_ioContext->post([this, callback]() {
|
||||
// start working
|
||||
// if there is a handler, there is also an io task
|
||||
if (isHandler && old < 2) {
|
||||
JobGuard jobGuard(this);
|
||||
jobGuard.work();
|
||||
|
||||
// reduce number of queued now
|
||||
decQueued();
|
||||
callback(true);
|
||||
|
||||
callback();
|
||||
});
|
||||
drain();
|
||||
} else {
|
||||
// capture without self, ioContext will not live longer than scheduler
|
||||
_ioContext->post([this, callback]() {
|
||||
auto guardQueue = scopeGuard([this]() { decQueued(); });
|
||||
|
||||
// no exception happened, cancel guard
|
||||
guardQueue.cancel();
|
||||
JobGuard jobGuard(this);
|
||||
jobGuard.work();
|
||||
|
||||
callback(false);
|
||||
});
|
||||
|
||||
// no exception happened, cancel guard
|
||||
guardQueue.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
// do not pass callback by reference, might get deleted before execution
|
||||
|
@ -231,10 +271,11 @@ void Scheduler::post(asio_ns::io_context::strand& strand, std::function<void()>
|
|||
auto guardQueue = scopeGuard([this]() { decQueued(); });
|
||||
|
||||
strand.post([this, callback]() {
|
||||
auto guardQueue = scopeGuard([this]() { decQueued(); });
|
||||
|
||||
JobGuard guard(this);
|
||||
guard.work();
|
||||
|
||||
decQueued();
|
||||
callback();
|
||||
});
|
||||
|
||||
|
@ -242,7 +283,8 @@ void Scheduler::post(asio_ns::io_context::strand& strand, std::function<void()>
|
|||
guardQueue.cancel();
|
||||
}
|
||||
|
||||
bool Scheduler::queue(RequestPriority prio, std::function<void()> const& callback) {
|
||||
bool Scheduler::queue(RequestPriority prio,
|
||||
std::function<void(bool)> const& callback, bool isHandler) {
|
||||
bool ok = true;
|
||||
|
||||
switch (prio) {
|
||||
|
@ -256,7 +298,7 @@ bool Scheduler::queue(RequestPriority prio, std::function<void()> const& callbac
|
|||
if (0 < _fifoSize[FIFO1] || !canPostDirectly(prio)) {
|
||||
ok = pushToFifo(FIFO1, callback);
|
||||
} else {
|
||||
post(callback);
|
||||
post(callback, isHandler);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -268,7 +310,7 @@ bool Scheduler::queue(RequestPriority prio, std::function<void()> const& callbac
|
|||
if (0 < _fifoSize[FIFO1] || 0 < _fifoSize[FIFO2] || !canPostDirectly(prio)) {
|
||||
ok = pushToFifo(FIFO2, callback);
|
||||
} else {
|
||||
post(callback);
|
||||
post(callback, isHandler);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -281,7 +323,7 @@ bool Scheduler::queue(RequestPriority prio, std::function<void()> const& callbac
|
|||
0 < _fifoSize[FIFO3] || !canPostDirectly(prio)) {
|
||||
ok = pushToFifo(FIFO3, callback);
|
||||
} else {
|
||||
post(callback);
|
||||
post(callback, isHandler);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -357,24 +399,23 @@ std::string Scheduler::infoStatus() {
|
|||
|
||||
bool Scheduler::canPostDirectly(RequestPriority prio) const noexcept {
|
||||
auto counters = getCounters();
|
||||
auto nrWorking = numWorking(counters);
|
||||
auto nrQueued = numQueued(counters);
|
||||
|
||||
switch (prio) {
|
||||
case RequestPriority::HIGH:
|
||||
return nrWorking + nrQueued < _maxThreads;
|
||||
return nrQueued < _maxThreads;
|
||||
|
||||
// the "/ 2" is an assumption that HIGH is typically responses to our outbound messages
|
||||
// where MED & LOW are incoming requests. Keep half the threads processing our work and half their work.
|
||||
case RequestPriority::MED:
|
||||
case RequestPriority::LOW:
|
||||
return nrWorking + nrQueued < _maxThreads / 2;
|
||||
return nrQueued < _maxThreads / 2;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Scheduler::pushToFifo(int64_t fifo, std::function<void()> const& callback) {
|
||||
bool Scheduler::pushToFifo(int64_t fifo, std::function<void(bool)> const& callback) {
|
||||
LOG_TOPIC(TRACE, Logger::THREADS) << "Push element on fifo: " << fifo;
|
||||
TRI_ASSERT(0 <= fifo && fifo < NUMBER_FIFOS);
|
||||
|
||||
|
@ -395,14 +436,15 @@ bool Scheduler::pushToFifo(int64_t fifo, std::function<void()> const& callback)
|
|||
|
||||
// then check, otherwise we might miss to wake up a thread
|
||||
auto counters = getCounters();
|
||||
auto nrWorking = numRunning(counters);
|
||||
auto nrQueued = numQueued(counters);
|
||||
|
||||
if (0 == nrWorking + nrQueued) {
|
||||
post([] {
|
||||
LOG_TOPIC(DEBUG, Logger::THREADS) << "Wakeup alarm";
|
||||
/*wakeup call for scheduler thread*/
|
||||
});
|
||||
if (0 == nrQueued) {
|
||||
post(
|
||||
[](bool) {
|
||||
LOG_TOPIC(DEBUG, Logger::THREADS) << "Wakeup alarm";
|
||||
/*wakeup call for scheduler thread*/
|
||||
},
|
||||
false);
|
||||
}
|
||||
} catch (...) {
|
||||
return false;
|
||||
|
@ -427,7 +469,7 @@ bool Scheduler::popFifo(int64_t fifo) {
|
|||
}
|
||||
});
|
||||
|
||||
post(job->_callback);
|
||||
post(job->_callback, false);
|
||||
|
||||
--_fifoSize[p];
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ class Scheduler {
|
|||
// currently serving the `io_context`.
|
||||
//
|
||||
// `numQueued` returns the number of jobs queued in the io_context
|
||||
// that are not yet worked on.
|
||||
// (that are working or not yet worked on).
|
||||
//
|
||||
// `numWorking`returns the number of jobs currently worked on.
|
||||
//
|
||||
|
@ -95,7 +95,7 @@ class Scheduler {
|
|||
uint64_t _fifo3;
|
||||
};
|
||||
|
||||
bool queue(RequestPriority prio, std::function<void()> const&);
|
||||
bool queue(RequestPriority prio, std::function<void(bool)> const&, bool isHandler = false);
|
||||
void post(asio_ns::io_context::strand&, std::function<void()> const callback);
|
||||
|
||||
void addQueueStatistics(velocypack::Builder&) const;
|
||||
|
@ -106,7 +106,7 @@ class Scheduler {
|
|||
bool isStopping() const noexcept { return (_counters & (1ULL << 63)) != 0; }
|
||||
|
||||
private:
|
||||
void post(std::function<void()> const callback);
|
||||
void post(std::function<void(bool)> const callback, bool isHandler);
|
||||
void drain();
|
||||
|
||||
inline void setStopping() noexcept { _counters |= (1ULL << 63); }
|
||||
|
@ -132,7 +132,10 @@ class Scheduler {
|
|||
return (value >> 32) & 0xFFFFULL;
|
||||
}
|
||||
|
||||
inline void incQueued() noexcept { _counters += 1ULL << 32; }
|
||||
inline uint64_t incQueued() noexcept {
|
||||
uint64_t old = _counters.fetch_add(1ULL << 32);
|
||||
return (old >> 32) & 0xFFFFULL;
|
||||
}
|
||||
|
||||
inline void decQueued() noexcept {
|
||||
TRI_ASSERT(((_counters & 0XFFFF00000000UL) >> 32) > 0);
|
||||
|
@ -175,11 +178,11 @@ class Scheduler {
|
|||
// queue is full
|
||||
|
||||
struct FifoJob {
|
||||
FifoJob(std::function<void()> const& callback) : _callback(callback) {}
|
||||
std::function<void()> _callback;
|
||||
FifoJob(std::function<void(bool)> const& callback) : _callback(callback) {}
|
||||
std::function<void(bool)> _callback;
|
||||
};
|
||||
|
||||
bool pushToFifo(int64_t fifo, std::function<void()> const& callback);
|
||||
bool pushToFifo(int64_t fifo, std::function<void(bool)> const& callback);
|
||||
bool popFifo(int64_t fifo);
|
||||
|
||||
static constexpr int64_t NUMBER_FIFOS = 3;
|
||||
|
@ -209,9 +212,8 @@ class Scheduler {
|
|||
return new asio_ns::steady_timer(*_ioContext);
|
||||
}
|
||||
|
||||
asio_ns::io_context::strand* newStrand() {
|
||||
return new asio_ns::io_context::strand(*_ioContext);
|
||||
}
|
||||
asio_ns::io_context::strand* newStrand();
|
||||
void releaseStrand(asio_ns::io_context::strand* strandDone);
|
||||
|
||||
asio_ns::ip::tcp::acceptor* newAcceptor() {
|
||||
return new asio_ns::ip::tcp::acceptor(*_ioContext);
|
||||
|
|
|
@ -47,7 +47,7 @@ class Socket {
|
|||
Socket(Socket const& that) = delete;
|
||||
Socket(Socket&& that) = delete;
|
||||
|
||||
virtual ~Socket() {}
|
||||
virtual ~Socket() { _scheduler->releaseStrand(_strand.get()); }
|
||||
|
||||
bool isEncrypted() const { return _encrypted; }
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ class Cursor {
|
|||
* Second: Result If State==DONE this contains Error information or NO_ERROR. On NO_ERROR result is filled.
|
||||
*/
|
||||
virtual std::pair<aql::ExecutionState, Result> dump(velocypack::Builder& result,
|
||||
std::function<void()> const&) = 0;
|
||||
std::function<void(bool)> const&) = 0;
|
||||
|
||||
/**
|
||||
* @brief Dump the cursor result. This is guaranteed to return the result in this thread.
|
||||
|
|
|
@ -644,7 +644,7 @@ Result Collections::warmup(TRI_vocbase_t& vocbase, LogicalCollection const& coll
|
|||
}
|
||||
|
||||
auto idxs = coll.getIndexes();
|
||||
auto poster = [](std::function<void()> fn) -> void {
|
||||
auto poster = [](std::function<void(bool)> fn) -> void {
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, fn);
|
||||
};
|
||||
auto queue = std::make_shared<basics::LocalTaskQueue>(poster);
|
||||
|
|
|
@ -276,7 +276,7 @@ std::function<void(const asio::error_code&)> Task::callbackFunction() {
|
|||
}
|
||||
|
||||
// now do the work:
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [self, this, execContext] {
|
||||
SchedulerFeature::SCHEDULER->queue(RequestPriority::LOW, [self, this, execContext](bool) {
|
||||
ExecContextScope scope(_user.empty() ? ExecContext::superuser()
|
||||
: execContext.get());
|
||||
|
||||
|
|
|
@ -508,9 +508,10 @@ void DumpFeature::collectOptions(std::shared_ptr<options::ProgramOptions> option
|
|||
"maximum size for individual data batches (in bytes)",
|
||||
new UInt64Parameter(&_options.maxChunkSize));
|
||||
|
||||
options->addOption("--threads",
|
||||
"maximum number of collections to process in parallel. From v3.4.0",
|
||||
new UInt32Parameter(&_options.threadCount));
|
||||
options->addOption(
|
||||
"--threads",
|
||||
"maximum number of collections to process in parallel. From v3.4.0",
|
||||
new UInt32Parameter(&_options.threadCount));
|
||||
|
||||
options->addOption("--dump-data", "dump collection data",
|
||||
new BooleanParameter(&_options.dumpData));
|
||||
|
|
|
@ -176,6 +176,7 @@ typedef long suseconds_t;
|
|||
#include "Basics/voc-errors.h"
|
||||
#include "Basics/error.h"
|
||||
#include "Basics/debugging.h"
|
||||
#include "Basics/error.h"
|
||||
#include "Basics/make_unique.h"
|
||||
#include "Basics/memory.h"
|
||||
#include "Basics/system-compiler.h"
|
||||
|
|
|
@ -45,7 +45,7 @@ LocalTask::LocalTask(std::shared_ptr<LocalTaskQueue> const& queue)
|
|||
|
||||
void LocalTask::dispatch() {
|
||||
auto self = shared_from_this();
|
||||
_queue->post([self, this]() {
|
||||
_queue->post([self, this](bool) {
|
||||
_queue->startTask();
|
||||
try {
|
||||
run();
|
||||
|
@ -62,16 +62,16 @@ void LocalTask::dispatch() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
LocalCallbackTask::LocalCallbackTask(std::shared_ptr<LocalTaskQueue> const& queue,
|
||||
std::function<void()> const& cb)
|
||||
std::function<void(bool)> const& cb)
|
||||
: _queue(queue), _cb(cb) {}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief run the callback and join
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void LocalCallbackTask::run() {
|
||||
void LocalCallbackTask::run(bool isDirect) {
|
||||
try {
|
||||
_cb();
|
||||
_cb(isDirect);
|
||||
} catch (...) {
|
||||
}
|
||||
_queue->join();
|
||||
|
@ -83,7 +83,7 @@ void LocalCallbackTask::run() {
|
|||
|
||||
void LocalCallbackTask::dispatch() {
|
||||
auto self = shared_from_this();
|
||||
_queue->post([self, this]() { run(); });
|
||||
_queue->post([self, this](bool isDirect) { run(isDirect); });
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -140,7 +140,7 @@ void LocalTaskQueue::enqueueCallback(std::shared_ptr<LocalCallbackTask> task) {
|
|||
/// by task dispatch.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void LocalTaskQueue::post(std::function<void()> fn) { _poster(fn); }
|
||||
void LocalTaskQueue::post(std::function<void(bool)> fn) { _poster(fn); }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief join a single task. reduces the number of waiting tasks and wakes
|
||||
|
|
|
@ -63,10 +63,10 @@ class LocalCallbackTask : public std::enable_shared_from_this<LocalCallbackTask>
|
|||
LocalCallbackTask& operator=(LocalCallbackTask const&) = delete;
|
||||
|
||||
LocalCallbackTask(std::shared_ptr<LocalTaskQueue> const& queue,
|
||||
std::function<void()> const& cb);
|
||||
std::function<void(bool)> const& cb);
|
||||
virtual ~LocalCallbackTask() {}
|
||||
|
||||
virtual void run();
|
||||
virtual void run(bool);
|
||||
void dispatch();
|
||||
|
||||
protected:
|
||||
|
@ -81,12 +81,12 @@ class LocalCallbackTask : public std::enable_shared_from_this<LocalCallbackTask>
|
|||
/// ignored; must not call queue->setStatus() or queue->enqueue())
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::function<void()> _cb;
|
||||
std::function<void(bool)> _cb;
|
||||
};
|
||||
|
||||
class LocalTaskQueue {
|
||||
public:
|
||||
typedef std::function<void(std::function<void()>)> PostFn;
|
||||
typedef std::function<void(std::function<void(bool)>)> PostFn;
|
||||
|
||||
LocalTaskQueue() = delete;
|
||||
LocalTaskQueue(LocalTaskQueue const&) = delete;
|
||||
|
@ -117,7 +117,7 @@ class LocalTaskQueue {
|
|||
/// by task dispatch.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void post(std::function<void()> fn);
|
||||
void post(std::function<void(bool)> fn);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief join a single task. reduces the number of waiting tasks and wakes
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <velocypack/velocypack-common.h>
|
||||
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
#include <velocypack/velocypack-common.h>
|
||||
|
||||
using namespace arangodb::basics;
|
||||
|
||||
|
|
|
@ -80,13 +80,13 @@
|
|||
"\xe1\x83\xa8\xe1\x83\x94\xe1\x83\xae\xe1\x83\x95\xe1\x83\x94\xe1\x83\x93" \
|
||||
"\xe1\x83\xa0\xe1\x83\x90\xe1\x83\x9b\xe1\x83\x93\xe1\x83\x94"
|
||||
|
||||
#define TRI_BYE_MESSAGE \
|
||||
TRI_BYE_MESSAGE_CH \
|
||||
" " TRI_BYE_MESSAGE_CN " " TRI_BYE_MESSAGE_CZ " " TRI_BYE_MESSAGE_DE \
|
||||
" " TRI_BYE_MESSAGE_EN " " TRI_BYE_MESSAGE_EO " " TRI_BYE_MESSAGE_ES \
|
||||
" " TRI_BYE_MESSAGE_GR "\n" TRI_BYE_MESSAGE_IL " " TRI_BYE_MESSAGE_IT \
|
||||
" " TRI_BYE_MESSAGE_NL " " TRI_BYE_MESSAGE_SV " " TRI_BYE_MESSAGE_FR \
|
||||
" " TRI_BYE_MESSAGE_JP " " TRI_BYE_MESSAGE_RU " " TRI_BYE_MESSAGE_PT \
|
||||
" " TRI_BYE_MESSAGE_FA " " TRI_BYE_MESSAGE_LV " " TRI_BYE_MESSAGE_GE \
|
||||
" " TRI_BYE_MESSAGE_KR
|
||||
#define TRI_BYE_MESSAGE \
|
||||
TRI_BYE_MESSAGE_CH \
|
||||
" " TRI_BYE_MESSAGE_CN " " TRI_BYE_MESSAGE_CZ " " TRI_BYE_MESSAGE_DE \
|
||||
" " TRI_BYE_MESSAGE_EN " " TRI_BYE_MESSAGE_EO " " TRI_BYE_MESSAGE_ES \
|
||||
" " TRI_BYE_MESSAGE_GR "\n" TRI_BYE_MESSAGE_IL " " TRI_BYE_MESSAGE_IT \
|
||||
" " TRI_BYE_MESSAGE_NL " " TRI_BYE_MESSAGE_SV " " TRI_BYE_MESSAGE_FR \
|
||||
" " TRI_BYE_MESSAGE_JP " " TRI_BYE_MESSAGE_RU " " TRI_BYE_MESSAGE_PT \
|
||||
" " TRI_BYE_MESSAGE_FA " " TRI_BYE_MESSAGE_LV " " TRI_BYE_MESSAGE_GE \
|
||||
" " TRI_BYE_MESSAGE_KR
|
||||
#endif
|
||||
|
|
|
@ -1307,7 +1307,7 @@ static e_sig_action whatDoesSignal(int signal) {
|
|||
return core;
|
||||
// case SIGEMT: // 7,-,7 Term Emulator trap
|
||||
case SIGSTKFLT: // -,16,- Term Stack fault on coprocessor (unused)
|
||||
// case SIGIO: // 23,29,22 Term I/O now possible (4.2BSD)
|
||||
// case SIGIO: // 23,29,22 Term I/O now possible (4.2BSD)
|
||||
case SIGPWR: // 29,30,19 Term Power failure (System V)
|
||||
// case SIGINFO: // 29,-,- A synonym for SIGPWR
|
||||
// case SIGLOST: // -,-,- Term File lock lost (unused)
|
||||
|
|
|
@ -36,7 +36,7 @@ class Callbacks {
|
|||
|
||||
typedef std::function<void(std::unique_ptr<GeneralResponse>)> OnSuccessCallback;
|
||||
|
||||
typedef std::function<void(std::function<void()>)> ScheduleMeCallback;
|
||||
typedef std::function<void(std::function<void(bool)>)> ScheduleMeCallback;
|
||||
|
||||
Callbacks() {}
|
||||
Callbacks(OnSuccessCallback onSuccess, OnErrorCallback onError)
|
||||
|
@ -48,7 +48,7 @@ class Callbacks {
|
|||
ScheduleMeCallback _scheduleMe;
|
||||
|
||||
protected:
|
||||
static void defaultScheduleMe(std::function<void()> task) { task(); }
|
||||
static void defaultScheduleMe(std::function<void(bool)> task) { task(false); }
|
||||
};
|
||||
} // namespace communicator
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -471,7 +471,7 @@ void Communicator::handleResult(CURL* handle, CURLcode rc) {
|
|||
// defensive code: intentionally not passing "this". There is a
|
||||
// possibility that Scheduler will execute the code after Communicator
|
||||
// object destroyed. use shared_from_this() if ever essential.
|
||||
rip->_callbacks._scheduleMe([curlHandle, handle, rc, rip] { // lamda rewrite starts
|
||||
rip->_callbacks._scheduleMe([curlHandle, handle, rc, rip](bool) { // lamda rewrite starts
|
||||
double connectTime = 0.0;
|
||||
LOG_TOPIC(TRACE, Logger::COMMUNICATION)
|
||||
<< ::buildPrefix(rip->_ticketId) << "curl rc is : " << rc << " after "
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class LogicalDataSource; // forward declaration
|
||||
}
|
||||
|
||||
|
@ -740,4 +739,4 @@ void TRI_AddGlobalFunctionVocbase(v8::Isolate* isolate, v8::Handle<v8::String> n
|
|||
void TRI_AddGlobalVariableVocbase(v8::Isolate* isolate, v8::Handle<v8::String> name,
|
||||
v8::Handle<v8::Value> value);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -47,7 +47,7 @@ using namespace arangodb::cache;
|
|||
TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
|
||||
SECTION("test basic constructor function") {
|
||||
uint64_t requestLimit = 1024 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, requestLimit);
|
||||
|
||||
REQUIRE(requestLimit == manager.globalLimit());
|
||||
|
@ -67,7 +67,7 @@ TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
|
|||
SECTION("test mixed cache types under mixed load") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -177,7 +177,7 @@ TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
|
|||
SECTION("test manager under cache lifecycle chaos") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -58,4 +58,4 @@ MockScheduler::~MockScheduler() {
|
|||
_ioService->stop();
|
||||
}
|
||||
|
||||
void MockScheduler::post(std::function<void()> fn) { _ioService->post(fn); }
|
||||
void MockScheduler::post(std::function<void(bool)> fn) { _ioService->post([fn](){fn(false);}); }
|
||||
|
|
|
@ -47,7 +47,7 @@ class MockScheduler {
|
|||
public:
|
||||
MockScheduler(size_t threads);
|
||||
~MockScheduler();
|
||||
void post(std::function<void()> fn);
|
||||
void post(std::function<void(bool)> fn);
|
||||
};
|
||||
|
||||
}; // end namespace cache
|
||||
|
|
|
@ -44,7 +44,7 @@ using namespace arangodb::cache;
|
|||
|
||||
TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
||||
SECTION("test basic cache creation") {
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 1024 * 1024);
|
||||
auto cache1 = manager.createCache(CacheType::Plain, false, 256 * 1024);
|
||||
REQUIRE(true);
|
||||
|
@ -61,7 +61,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("check that insertion works as expected") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cache = manager.createCache(CacheType::Plain, false, cacheLimit);
|
||||
|
||||
|
@ -112,7 +112,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("test that removal works as expected") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cache = manager.createCache(CacheType::Plain, false, cacheLimit);
|
||||
|
||||
|
@ -171,7 +171,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
SECTION("verify that cache can indeed grow when it runs out of space") {
|
||||
uint64_t minimumUsage = 1024 * 1024;
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -196,7 +196,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
SECTION("test behavior under mixed load") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -290,7 +290,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("test hit rate statistics reporting") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cacheMiss = manager.createCache(CacheType::Plain, true, cacheLimit);
|
||||
auto cacheHit = manager.createCache(CacheType::Plain, true, cacheLimit);
|
||||
|
|
|
@ -50,7 +50,7 @@ TEST_CASE("cache::Rebalancer", "[cache][!hide][longRunning]") {
|
|||
SECTION("test rebalancing with PlainCache") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -176,7 +176,7 @@ TEST_CASE("cache::Rebalancer", "[cache][!hide][longRunning]") {
|
|||
SECTION("test rebalancing with TransactionalCache") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -45,7 +45,7 @@ using namespace arangodb::cache;
|
|||
|
||||
TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
||||
SECTION("test basic cache construction") {
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 1024 * 1024);
|
||||
auto cache1 =
|
||||
manager.createCache(CacheType::Transactional, false, 256 * 1024);
|
||||
|
@ -63,7 +63,7 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("verify that insertion works as expected") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(CacheType::Transactional, false, cacheLimit);
|
||||
|
@ -115,7 +115,7 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("verify removal works as expected") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(CacheType::Transactional, false, cacheLimit);
|
||||
|
@ -174,7 +174,7 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
|||
|
||||
SECTION("verify blacklisting works as expected") {
|
||||
uint64_t cacheLimit = 256 * 1024;
|
||||
auto postFn = [](std::function<void()>) -> bool { return false; };
|
||||
auto postFn = [](std::function<void(bool)>) -> bool { return false; };
|
||||
Manager manager(postFn, 4 * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(CacheType::Transactional, false, cacheLimit);
|
||||
|
@ -240,7 +240,7 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
|||
SECTION("verify cache can grow correctly when it runs out of space") {
|
||||
uint64_t minimumUsage = 1024 * 1024;
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -265,7 +265,7 @@ TEST_CASE("cache::TransactionalCache", "[cache][!hide][longRunning]") {
|
|||
SECTION("test behavior under mixed load") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -75,7 +75,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
SECTION("test hit rate for read-only hotset workload") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -128,7 +128,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
SECTION("test hit rate for mixed workload") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -210,7 +210,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
SECTION("test transactionality for mixed workload") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
@ -297,7 +297,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
SECTION("test rebalancing in the wild") {
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
auto postFn = [&scheduler](std::function<void()> fn) -> bool {
|
||||
auto postFn = [&scheduler](std::function<void(bool)> fn) -> bool {
|
||||
scheduler.post(fn);
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -574,8 +574,8 @@ std::shared_ptr<arangodb::Index> PhysicalCollectionMock::createIndex(arangodb::v
|
|||
|
||||
|
||||
asio::io_context ioContext;
|
||||
auto poster = [&ioContext](std::function<void()> fn) -> void {
|
||||
ioContext.post(fn);
|
||||
auto poster = [&ioContext](std::function<void(bool)> fn) -> void {
|
||||
ioContext.post([fn](){fn(false);});
|
||||
};
|
||||
arangodb::basics::LocalTaskQueue taskQueue(poster);
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> taskQueuePtr(&taskQueue, [](arangodb::basics::LocalTaskQueue*)->void{});
|
||||
|
|
Loading…
Reference in New Issue