mirror of https://gitee.com/bigwinds/arangodb
Feature/remove event loop (#5565)
This commit is contained in:
parent
396d98a1cc
commit
efc030ea87
|
@ -66,12 +66,12 @@ static std::string const Open("/_open/");
|
||||||
// --SECTION-- constructors and destructors
|
// --SECTION-- constructors and destructors
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
GeneralCommTask::GeneralCommTask(EventLoop loop, GeneralServer* server,
|
GeneralCommTask::GeneralCommTask(Scheduler* scheduler, GeneralServer* server,
|
||||||
std::unique_ptr<Socket> socket,
|
std::unique_ptr<Socket> socket,
|
||||||
ConnectionInfo&& info, double keepAliveTimeout,
|
ConnectionInfo&& info, double keepAliveTimeout,
|
||||||
bool skipSocketInit)
|
bool skipSocketInit)
|
||||||
: Task(loop, "GeneralCommTask"),
|
: Task(scheduler, "GeneralCommTask"),
|
||||||
SocketTask(loop, std::move(socket), std::move(info), keepAliveTimeout,
|
SocketTask(scheduler, std::move(socket), std::move(info), keepAliveTimeout,
|
||||||
skipSocketInit),
|
skipSocketInit),
|
||||||
_server(server),
|
_server(server),
|
||||||
_auth(nullptr) {
|
_auth(nullptr) {
|
||||||
|
@ -364,7 +364,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
||||||
} else if (handler->isDirect()) {
|
} else if (handler->isDirect()) {
|
||||||
isDirect = true;
|
isDirect = true;
|
||||||
} else if (queuePrio != JobQueue::BACKGROUND_QUEUE &&
|
} else if (queuePrio != JobQueue::BACKGROUND_QUEUE &&
|
||||||
_loop.scheduler->shouldExecuteDirect()) {
|
_scheduler->shouldExecuteDirect()) {
|
||||||
isDirect = true;
|
isDirect = true;
|
||||||
} else if (ServerState::instance()->isDBServer()) {
|
} else if (ServerState::instance()->isDBServer()) {
|
||||||
isPrio = true;
|
isPrio = true;
|
||||||
|
@ -388,7 +388,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
|
|
||||||
if (isPrio) {
|
if (isPrio) {
|
||||||
_loop.scheduler->post([self, this, handler]() {
|
_scheduler->post([self, this, handler]() {
|
||||||
handleRequestDirectly(basics::ConditionalLocking::DoLock,
|
handleRequestDirectly(basics::ConditionalLocking::DoLock,
|
||||||
std::move(handler));
|
std::move(handler));
|
||||||
});
|
});
|
||||||
|
@ -397,7 +397,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
||||||
|
|
||||||
// ok, we need to queue the request
|
// ok, we need to queue the request
|
||||||
LOG_TOPIC(TRACE, Logger::THREADS) << "too much work, queuing handler: "
|
LOG_TOPIC(TRACE, Logger::THREADS) << "too much work, queuing handler: "
|
||||||
<< _loop.scheduler->infoStatus();
|
<< _scheduler->infoStatus();
|
||||||
uint64_t messageId = handler->messageId();
|
uint64_t messageId = handler->messageId();
|
||||||
auto job = std::make_unique<Job>(
|
auto job = std::make_unique<Job>(
|
||||||
_server, std::move(handler),
|
_server, std::move(handler),
|
||||||
|
@ -419,7 +419,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
|
||||||
// Just run the handler, could have been called in a different thread
|
// Just run the handler, could have been called in a different thread
|
||||||
void GeneralCommTask::handleRequestDirectly(
|
void GeneralCommTask::handleRequestDirectly(
|
||||||
bool doLock, std::shared_ptr<RestHandler> handler) {
|
bool doLock, std::shared_ptr<RestHandler> handler) {
|
||||||
TRI_ASSERT(doLock || _peer->strand.running_in_this_thread());
|
TRI_ASSERT(doLock || _peer->runningInThisThread());
|
||||||
|
|
||||||
handler->runHandler([this, doLock](rest::RestHandler* handler) {
|
handler->runHandler([this, doLock](rest::RestHandler* handler) {
|
||||||
RequestStatistics* stat = handler->stealStatistics();
|
RequestStatistics* stat = handler->stealStatistics();
|
||||||
|
@ -427,15 +427,12 @@ void GeneralCommTask::handleRequestDirectly(
|
||||||
if (doLock) {
|
if (doLock) {
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
auto h = handler->shared_from_this();
|
auto h = handler->shared_from_this();
|
||||||
_loop.scheduler->_nrQueued++;
|
|
||||||
_peer->strand.post([self, this, stat, h]() {
|
_peer->post([self, this, stat, h]() {
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
addResponse(*(h->response()), stat);
|
addResponse(*(h->response()), stat);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
addResponse(*handler->response(), stat);
|
addResponse(*handler->response(), stat);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -85,7 +85,7 @@ class GeneralCommTask : public SocketTask {
|
||||||
GeneralCommTask const& operator=(GeneralCommTask const&) = delete;
|
GeneralCommTask const& operator=(GeneralCommTask const&) = delete;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GeneralCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket>,
|
GeneralCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket>,
|
||||||
ConnectionInfo&&, double keepAliveTimeout,
|
ConnectionInfo&&, double keepAliveTimeout,
|
||||||
bool skipSocketInit = false);
|
bool skipSocketInit = false);
|
||||||
|
|
||||||
|
|
|
@ -38,11 +38,11 @@ using namespace arangodb::rest;
|
||||||
/// @brief listen to given port
|
/// @brief listen to given port
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
GeneralListenTask::GeneralListenTask(EventLoop loop, GeneralServer* server,
|
GeneralListenTask::GeneralListenTask(Scheduler* scheduler, GeneralServer* server,
|
||||||
Endpoint* endpoint,
|
Endpoint* endpoint,
|
||||||
ProtocolType connectionType)
|
ProtocolType connectionType)
|
||||||
: Task(loop, "GeneralListenTask"),
|
: Task(scheduler, "GeneralListenTask"),
|
||||||
ListenTask(loop, endpoint),
|
ListenTask(scheduler, endpoint),
|
||||||
_server(server),
|
_server(server),
|
||||||
_connectionType(connectionType) {
|
_connectionType(connectionType) {
|
||||||
_keepAliveTimeout = GeneralServerFeature::keepAliveTimeout();
|
_keepAliveTimeout = GeneralServerFeature::keepAliveTimeout();
|
||||||
|
@ -52,7 +52,7 @@ GeneralListenTask::GeneralListenTask(EventLoop loop, GeneralServer* server,
|
||||||
|
|
||||||
void GeneralListenTask::handleConnected(std::unique_ptr<Socket> socket,
|
void GeneralListenTask::handleConnected(std::unique_ptr<Socket> socket,
|
||||||
ConnectionInfo&& info) {
|
ConnectionInfo&& info) {
|
||||||
auto commTask = std::make_shared<HttpCommTask>(_loop, _server, std::move(socket),
|
auto commTask = std::make_shared<HttpCommTask>(_scheduler, _server, std::move(socket),
|
||||||
std::move(info), _keepAliveTimeout);
|
std::move(info), _keepAliveTimeout);
|
||||||
bool res = commTask->start();
|
bool res = commTask->start();
|
||||||
LOG_TOPIC_IF(DEBUG, Logger::COMMUNICATION, res) << "Started comm task";
|
LOG_TOPIC_IF(DEBUG, Logger::COMMUNICATION, res) << "Started comm task";
|
||||||
|
|
|
@ -42,7 +42,7 @@ class GeneralListenTask final : public ListenTask {
|
||||||
GeneralListenTask& operator=(GeneralListenTask const&) = delete;
|
GeneralListenTask& operator=(GeneralListenTask const&) = delete;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GeneralListenTask(EventLoop, GeneralServer*, Endpoint*,
|
GeneralListenTask(Scheduler*, GeneralServer*, Endpoint*,
|
||||||
ProtocolType connectionType);
|
ProtocolType connectionType);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
@ -43,9 +43,7 @@ using namespace arangodb::rest;
|
||||||
// --SECTION-- constructors and destructors
|
// --SECTION-- constructors and destructors
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
GeneralServer::~GeneralServer() {
|
GeneralServer::~GeneralServer() { _listenTasks.clear(); }
|
||||||
_listenTasks.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// --SECTION-- public methods
|
// --SECTION-- public methods
|
||||||
|
@ -57,15 +55,17 @@ void GeneralServer::setEndpointList(EndpointList const* list) {
|
||||||
|
|
||||||
void GeneralServer::startListening() {
|
void GeneralServer::startListening() {
|
||||||
for (auto& it : _endpointList->allEndpoints()) {
|
for (auto& it : _endpointList->allEndpoints()) {
|
||||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "trying to bind to endpoint '" << it.first
|
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "trying to bind to endpoint '"
|
||||||
<< "' for requests";
|
<< it.first << "' for requests";
|
||||||
|
|
||||||
bool ok = openEndpoint(it.second);
|
bool ok = openEndpoint(it.second);
|
||||||
|
|
||||||
if (ok) {
|
if (ok) {
|
||||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "bound to endpoint '" << it.first << "'";
|
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "bound to endpoint '"
|
||||||
|
<< it.first << "'";
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "failed to bind to endpoint '" << it.first
|
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||||
|
<< "failed to bind to endpoint '" << it.first
|
||||||
<< "'. Please check whether another instance is already "
|
<< "'. Please check whether another instance is already "
|
||||||
"running using this endpoint and review your endpoints "
|
"running using this endpoint and review your endpoints "
|
||||||
"configuration.";
|
"configuration.";
|
||||||
|
@ -94,8 +94,8 @@ bool GeneralServer::openEndpoint(Endpoint* endpoint) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ListenTask> task;
|
std::unique_ptr<ListenTask> task;
|
||||||
task.reset(new GeneralListenTask(SchedulerFeature::SCHEDULER->eventLoop(),
|
task.reset(new GeneralListenTask(SchedulerFeature::SCHEDULER, this, endpoint,
|
||||||
this, endpoint, protocolType));
|
protocolType));
|
||||||
if (!task->start()) {
|
if (!task->start()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,11 +46,11 @@ size_t const HttpCommTask::MaximalBodySize = 1024 * 1024 * 1024; // 1024 MB
|
||||||
size_t const HttpCommTask::MaximalPipelineSize = 1024 * 1024 * 1024; // 1024 MB
|
size_t const HttpCommTask::MaximalPipelineSize = 1024 * 1024 * 1024; // 1024 MB
|
||||||
size_t const HttpCommTask::RunCompactEvery = 500;
|
size_t const HttpCommTask::RunCompactEvery = 500;
|
||||||
|
|
||||||
HttpCommTask::HttpCommTask(EventLoop loop, GeneralServer* server,
|
HttpCommTask::HttpCommTask(Scheduler* scheduler, GeneralServer* server,
|
||||||
std::unique_ptr<Socket> socket,
|
std::unique_ptr<Socket> socket,
|
||||||
ConnectionInfo&& info, double timeout)
|
ConnectionInfo&& info, double timeout)
|
||||||
: Task(loop, "HttpCommTask"),
|
: Task(scheduler, "HttpCommTask"),
|
||||||
GeneralCommTask(loop, server, std::move(socket), std::move(info),
|
GeneralCommTask(scheduler, server, std::move(socket), std::move(info),
|
||||||
timeout),
|
timeout),
|
||||||
_readPosition(0),
|
_readPosition(0),
|
||||||
_startPosition(0),
|
_startPosition(0),
|
||||||
|
@ -94,7 +94,8 @@ void HttpCommTask::addSimpleResponse(rest::ResponseCode code, rest::ContentType
|
||||||
|
|
||||||
void HttpCommTask::addResponse(GeneralResponse& baseResponse,
|
void HttpCommTask::addResponse(GeneralResponse& baseResponse,
|
||||||
RequestStatistics* stat) {
|
RequestStatistics* stat) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
HttpResponse& response = dynamic_cast<HttpResponse&>(baseResponse);
|
HttpResponse& response = dynamic_cast<HttpResponse&>(baseResponse);
|
||||||
#else
|
#else
|
||||||
|
@ -201,7 +202,7 @@ void HttpCommTask::addResponse(GeneralResponse& baseResponse,
|
||||||
// reads data from the socket
|
// reads data from the socket
|
||||||
// caller must hold the _lock
|
// caller must hold the _lock
|
||||||
bool HttpCommTask::processRead(double startTime) {
|
bool HttpCommTask::processRead(double startTime) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
cancelKeepAlive();
|
cancelKeepAlive();
|
||||||
TRI_ASSERT(_readBuffer.c_str() != nullptr);
|
TRI_ASSERT(_readBuffer.c_str() != nullptr);
|
||||||
|
@ -281,7 +282,7 @@ bool HttpCommTask::processRead(double startTime) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<GeneralCommTask> commTask = std::make_shared<VstCommTask>(
|
std::shared_ptr<GeneralCommTask> commTask = std::make_shared<VstCommTask>(
|
||||||
_loop, _server, std::move(_peer), std::move(_connectionInfo),
|
_scheduler, _server, std::move(_peer), std::move(_connectionInfo),
|
||||||
GeneralServerFeature::keepAliveTimeout(),
|
GeneralServerFeature::keepAliveTimeout(),
|
||||||
protocolVersion, /*skipSocketInit*/ true);
|
protocolVersion, /*skipSocketInit*/ true);
|
||||||
commTask->addToReadBuffer(_readBuffer.c_str() + 11,
|
commTask->addToReadBuffer(_readBuffer.c_str() + 11,
|
||||||
|
@ -594,7 +595,8 @@ bool HttpCommTask::processRead(double startTime) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void HttpCommTask::processRequest(std::unique_ptr<HttpRequest> request) {
|
void HttpCommTask::processRequest(std::unique_ptr<HttpRequest> request) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
{
|
{
|
||||||
LOG_TOPIC(DEBUG, Logger::REQUESTS)
|
LOG_TOPIC(DEBUG, Logger::REQUESTS)
|
||||||
<< "\"http-request-begin\",\"" << (void*)this << "\",\""
|
<< "\"http-request-begin\",\"" << (void*)this << "\",\""
|
||||||
|
|
|
@ -17,7 +17,7 @@ class HttpCommTask final : public GeneralCommTask {
|
||||||
static size_t const RunCompactEvery;
|
static size_t const RunCompactEvery;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
HttpCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket> socket,
|
HttpCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket> socket,
|
||||||
ConnectionInfo&&, double timeout);
|
ConnectionInfo&&, double timeout);
|
||||||
|
|
||||||
arangodb::Endpoint::TransportType transportType() override {
|
arangodb::Endpoint::TransportType transportType() override {
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
|
|
||||||
#include "Basics/Common.h"
|
#include "Basics/Common.h"
|
||||||
#include "Rest/GeneralResponse.h"
|
#include "Rest/GeneralResponse.h"
|
||||||
#include "Scheduler/EventLoop.h"
|
|
||||||
#include "Scheduler/JobQueue.h"
|
#include "Scheduler/JobQueue.h"
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
|
|
|
@ -79,12 +79,12 @@ inline std::size_t validateAndCount(char const* vpStart,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VstCommTask::VstCommTask(EventLoop loop, GeneralServer* server,
|
VstCommTask::VstCommTask(Scheduler* scheduler, GeneralServer* server,
|
||||||
std::unique_ptr<Socket> socket, ConnectionInfo&& info,
|
std::unique_ptr<Socket> socket, ConnectionInfo&& info,
|
||||||
double timeout, ProtocolVersion protocolVersion,
|
double timeout, ProtocolVersion protocolVersion,
|
||||||
bool skipInit)
|
bool skipInit)
|
||||||
: Task(loop, "VstCommTask"),
|
: Task(scheduler, "VstCommTask"),
|
||||||
GeneralCommTask(loop, server, std::move(socket), std::move(info), timeout,
|
GeneralCommTask(scheduler, server, std::move(socket), std::move(info), timeout,
|
||||||
skipInit),
|
skipInit),
|
||||||
_authorized(false),
|
_authorized(false),
|
||||||
_authMethod(rest::AuthenticationMethod::NONE),
|
_authMethod(rest::AuthenticationMethod::NONE),
|
||||||
|
@ -119,8 +119,8 @@ void VstCommTask::addSimpleResponse(rest::ResponseCode code, rest::ContentType r
|
||||||
|
|
||||||
void VstCommTask::addResponse(GeneralResponse& baseResponse,
|
void VstCommTask::addResponse(GeneralResponse& baseResponse,
|
||||||
RequestStatistics* stat) {
|
RequestStatistics* stat) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
//_lock.assertLockedByCurrentThread();
|
|
||||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
VstResponse& response = dynamic_cast<VstResponse&>(baseResponse);
|
VstResponse& response = dynamic_cast<VstResponse&>(baseResponse);
|
||||||
#else
|
#else
|
||||||
|
@ -316,8 +316,7 @@ void VstCommTask::handleAuthHeader(VPackSlice const& header,
|
||||||
|
|
||||||
// reads data from the socket
|
// reads data from the socket
|
||||||
bool VstCommTask::processRead(double startTime) {
|
bool VstCommTask::processRead(double startTime) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
//_lock.assertLockedByCurrentThread();
|
|
||||||
|
|
||||||
auto& prv = _processReadVariables;
|
auto& prv = _processReadVariables;
|
||||||
auto chunkBegin = _readBuffer.begin() + prv._readBufferOffset;
|
auto chunkBegin = _readBuffer.begin() + prv._readBufferOffset;
|
||||||
|
|
|
@ -38,7 +38,7 @@ namespace rest {
|
||||||
|
|
||||||
class VstCommTask final : public GeneralCommTask {
|
class VstCommTask final : public GeneralCommTask {
|
||||||
public:
|
public:
|
||||||
VstCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket> socket,
|
VstCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket> socket,
|
||||||
ConnectionInfo&&, double timeout, ProtocolVersion protocolVersion,
|
ConnectionInfo&&, double timeout, ProtocolVersion protocolVersion,
|
||||||
bool skipSocketInit = false);
|
bool skipSocketInit = false);
|
||||||
|
|
||||||
|
|
|
@ -419,12 +419,10 @@ void Conductor::startRecovery() {
|
||||||
_statistics.reset();
|
_statistics.reset();
|
||||||
|
|
||||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||||
asio::io_context* ioService = SchedulerFeature::SCHEDULER->ioContext();
|
|
||||||
TRI_ASSERT(ioService != nullptr);
|
|
||||||
|
|
||||||
// let's wait for a final state in the cluster
|
// let's wait for a final state in the cluster
|
||||||
_boost_timer.reset(new asio::deadline_timer(
|
_boost_timer.reset(SchedulerFeature::SCHEDULER->newDeadlineTimer(
|
||||||
*ioService, boost::posix_time::seconds(2)));
|
boost::posix_time::seconds(2)));
|
||||||
_boost_timer->async_wait([this](const asio::error_code& error) {
|
_boost_timer->async_wait([this](const asio::error_code& error) {
|
||||||
_boost_timer.reset();
|
_boost_timer.reset();
|
||||||
|
|
||||||
|
|
|
@ -576,15 +576,13 @@ void Worker<V, E, M>::_continueAsync() {
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||||
asio::io_context* ioService = SchedulerFeature::SCHEDULER->ioContext();
|
|
||||||
TRI_ASSERT(ioService != nullptr);
|
|
||||||
|
|
||||||
// wait for new messages before beginning to process
|
// wait for new messages before beginning to process
|
||||||
int64_t milli =
|
int64_t milli =
|
||||||
_writeCache->containedMessageCount() < _messageBatchSize ? 50 : 5;
|
_writeCache->containedMessageCount() < _messageBatchSize ? 50 : 5;
|
||||||
// start next iteration in $milli mseconds.
|
// start next iteration in $milli mseconds.
|
||||||
_boost_timer.reset(new asio::deadline_timer(
|
_boost_timer.reset(SchedulerFeature::SCHEDULER->newDeadlineTimer(
|
||||||
*ioService, boost::posix_time::millisec(milli)));
|
boost::posix_time::millisec(milli)));
|
||||||
_boost_timer->async_wait([this](const asio::error_code& error) {
|
_boost_timer->async_wait([this](const asio::error_code& error) {
|
||||||
if (error != asio::error::operation_aborted) {
|
if (error != asio::error::operation_aborted) {
|
||||||
{ // swap these pointers atomically
|
{ // swap these pointers atomically
|
||||||
|
|
|
@ -31,15 +31,15 @@
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
|
|
||||||
Acceptor::Acceptor(asio_ns::io_context& ioService, Endpoint* endpoint)
|
Acceptor::Acceptor(rest::Scheduler* scheduler, Endpoint* endpoint)
|
||||||
: _ioContext(ioService), _endpoint(endpoint) {}
|
: _scheduler(scheduler), _endpoint(endpoint) {}
|
||||||
|
|
||||||
std::unique_ptr<Acceptor> Acceptor::factory(asio_ns::io_context& ioService,
|
std::unique_ptr<Acceptor> Acceptor::factory(rest::Scheduler* scheduler,
|
||||||
Endpoint* endpoint) {
|
Endpoint* endpoint) {
|
||||||
#ifdef ARANGODB_HAVE_DOMAIN_SOCKETS
|
#ifdef ARANGODB_HAVE_DOMAIN_SOCKETS
|
||||||
if (endpoint->domainType() == Endpoint::DomainType::UNIX) {
|
if (endpoint->domainType() == Endpoint::DomainType::UNIX) {
|
||||||
return std::make_unique<AcceptorUnixDomain>(ioService, endpoint);
|
return std::make_unique<AcceptorUnixDomain>(scheduler, endpoint);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return std::make_unique<AcceptorTcp>(ioService, endpoint);
|
return std::make_unique<AcceptorTcp>(scheduler, endpoint);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ class Acceptor {
|
||||||
typedef std::function<void(asio_ns::error_code const&)> AcceptHandler;
|
typedef std::function<void(asio_ns::error_code const&)> AcceptHandler;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Acceptor(asio_ns::io_context& ioService, Endpoint* endpoint);
|
Acceptor(rest::Scheduler*, Endpoint* endpoint);
|
||||||
virtual ~Acceptor() {}
|
virtual ~Acceptor() {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -46,11 +46,10 @@ class Acceptor {
|
||||||
std::unique_ptr<Socket> movePeer() { return std::move(_peer); };
|
std::unique_ptr<Socket> movePeer() { return std::move(_peer); };
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static std::unique_ptr<Acceptor> factory(asio_ns::io_context& _ioService,
|
static std::unique_ptr<Acceptor> factory(rest::Scheduler*, Endpoint*);
|
||||||
Endpoint* endpoint);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
asio_ns::io_context& _ioContext;
|
rest::Scheduler* _scheduler;
|
||||||
Endpoint* _endpoint;
|
Endpoint* _endpoint;
|
||||||
std::unique_ptr<Socket> _peer;
|
std::unique_ptr<Socket> _peer;
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
|
|
||||||
void AcceptorTcp::open() {
|
void AcceptorTcp::open() {
|
||||||
asio_ns::ip::tcp::resolver resolver(_ioContext);
|
std::unique_ptr<asio_ns::ip::tcp::resolver> resolver(_scheduler->newResolver());
|
||||||
|
|
||||||
std::string hostname = _endpoint->host();
|
std::string hostname = _endpoint->host();
|
||||||
int portNumber = _endpoint->port();
|
int portNumber = _endpoint->port();
|
||||||
|
@ -53,7 +53,7 @@ void AcceptorTcp::open() {
|
||||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_IP_ADDRESS_INVALID);
|
THROW_ARANGO_EXCEPTION(TRI_ERROR_IP_ADDRESS_INVALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
asio_ns::ip::tcp::resolver::iterator iter = resolver.resolve(*query, err);
|
asio_ns::ip::tcp::resolver::iterator iter = resolver->resolve(*query, err);
|
||||||
if (err) {
|
if (err) {
|
||||||
LOG_TOPIC(ERR, Logger::COMMUNICATION)
|
LOG_TOPIC(ERR, Logger::COMMUNICATION)
|
||||||
<< "unable to to resolve endpoint ' " << _endpoint->specification()
|
<< "unable to to resolve endpoint ' " << _endpoint->specification()
|
||||||
|
@ -68,7 +68,7 @@ void AcceptorTcp::open() {
|
||||||
|
|
||||||
asioEndpoint = iter->endpoint(); // function not documented in boost?!
|
asioEndpoint = iter->endpoint(); // function not documented in boost?!
|
||||||
}
|
}
|
||||||
_acceptor.open(asioEndpoint.protocol());
|
_acceptor->open(asioEndpoint.protocol());
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
// on Windows everything is different of course:
|
// on Windows everything is different of course:
|
||||||
|
@ -85,11 +85,11 @@ void AcceptorTcp::open() {
|
||||||
"unable to set acceptor socket option");
|
"unable to set acceptor socket option");
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
_acceptor.set_option(asio_ns::ip::tcp::acceptor::reuse_address(
|
_acceptor->set_option(asio_ns::ip::tcp::acceptor::reuse_address(
|
||||||
((EndpointIp*)_endpoint)->reuseAddress()));
|
((EndpointIp*)_endpoint)->reuseAddress()));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_acceptor.bind(asioEndpoint, err);
|
_acceptor->bind(asioEndpoint, err);
|
||||||
if (err) {
|
if (err) {
|
||||||
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to bind to endpoint '"
|
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to bind to endpoint '"
|
||||||
<< _endpoint->specification()
|
<< _endpoint->specification()
|
||||||
|
@ -98,7 +98,7 @@ void AcceptorTcp::open() {
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_ASSERT(_endpoint->listenBacklog() > 8);
|
TRI_ASSERT(_endpoint->listenBacklog() > 8);
|
||||||
_acceptor.listen(_endpoint->listenBacklog(), err);
|
_acceptor->listen(_endpoint->listenBacklog(), err);
|
||||||
if (err) {
|
if (err) {
|
||||||
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to listen to endpoint '"
|
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to listen to endpoint '"
|
||||||
<< _endpoint->specification() << ": "
|
<< _endpoint->specification() << ": "
|
||||||
|
@ -110,13 +110,13 @@ void AcceptorTcp::open() {
|
||||||
void AcceptorTcp::asyncAccept(AcceptHandler const& handler) {
|
void AcceptorTcp::asyncAccept(AcceptHandler const& handler) {
|
||||||
TRI_ASSERT(!_peer);
|
TRI_ASSERT(!_peer);
|
||||||
if (_endpoint->encryption() == Endpoint::EncryptionType::SSL) {
|
if (_endpoint->encryption() == Endpoint::EncryptionType::SSL) {
|
||||||
_peer.reset(new SocketSslTcp(_ioContext,
|
_peer.reset(new SocketSslTcp(_scheduler,
|
||||||
SslServerFeature::SSL->createSslContext()));
|
SslServerFeature::SSL->createSslContext()));
|
||||||
SocketSslTcp* peer = static_cast<SocketSslTcp*>(_peer.get());
|
SocketSslTcp* peer = static_cast<SocketSslTcp*>(_peer.get());
|
||||||
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
_acceptor->async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
||||||
} else {
|
} else {
|
||||||
_peer.reset(new SocketTcp(_ioContext));
|
_peer.reset(new SocketTcp(_scheduler));
|
||||||
SocketTcp* peer = static_cast<SocketTcp*>(_peer.get());
|
SocketTcp* peer = static_cast<SocketTcp*>(_peer.get());
|
||||||
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
_acceptor->async_accept(*peer->_socket, peer->_peerEndpoint, handler);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,16 +28,16 @@
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
class AcceptorTcp final : public Acceptor {
|
class AcceptorTcp final : public Acceptor {
|
||||||
public:
|
public:
|
||||||
AcceptorTcp(asio_ns::io_context& ioContext, Endpoint* endpoint)
|
AcceptorTcp(rest::Scheduler* scheduler, Endpoint* endpoint)
|
||||||
: Acceptor(ioContext, endpoint), _acceptor(ioContext) {}
|
: Acceptor(scheduler, endpoint), _acceptor(scheduler->newAcceptor()) {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void open() override;
|
void open() override;
|
||||||
void close() override { _acceptor.close(); };
|
void close() override { _acceptor->close(); };
|
||||||
void asyncAccept(Acceptor::AcceptHandler const& handler) override;
|
void asyncAccept(Acceptor::AcceptHandler const& handler) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
asio_ns::ip::tcp::acceptor _acceptor;
|
std::unique_ptr<asio_ns::ip::tcp::acceptor> _acceptor;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,23 +45,23 @@ void AcceptorUnixDomain::open() {
|
||||||
}
|
}
|
||||||
|
|
||||||
asio_ns::local::stream_protocol::stream_protocol::endpoint endpoint(path);
|
asio_ns::local::stream_protocol::stream_protocol::endpoint endpoint(path);
|
||||||
_acceptor.open(endpoint.protocol());
|
_acceptor->open(endpoint.protocol());
|
||||||
_acceptor.bind(endpoint);
|
_acceptor->bind(endpoint);
|
||||||
_acceptor.listen();
|
_acceptor->listen();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AcceptorUnixDomain::asyncAccept(AcceptHandler const& handler) {
|
void AcceptorUnixDomain::asyncAccept(AcceptHandler const& handler) {
|
||||||
TRI_ASSERT(!_peer);
|
TRI_ASSERT(!_peer);
|
||||||
_peer.reset(new SocketUnixDomain(_ioContext));
|
_peer.reset(new SocketUnixDomain(_scheduler));
|
||||||
auto peer = dynamic_cast<SocketUnixDomain*>(_peer.get());
|
auto peer = dynamic_cast<SocketUnixDomain*>(_peer.get());
|
||||||
if (peer == nullptr) {
|
if (peer == nullptr) {
|
||||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected socket type");
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected socket type");
|
||||||
}
|
}
|
||||||
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
|
_acceptor->async_accept(*peer->_socket, peer->_peerEndpoint, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AcceptorUnixDomain::close() {
|
void AcceptorUnixDomain::close() {
|
||||||
_acceptor.close();
|
_acceptor->close();
|
||||||
int error = 0;
|
int error = 0;
|
||||||
std::string path = ((EndpointUnixDomain*) _endpoint)->path();
|
std::string path = ((EndpointUnixDomain*) _endpoint)->path();
|
||||||
if (!basics::FileUtils::remove(path, &error)) {
|
if (!basics::FileUtils::remove(path, &error)) {
|
||||||
|
|
|
@ -28,8 +28,9 @@
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
class AcceptorUnixDomain final : public Acceptor {
|
class AcceptorUnixDomain final : public Acceptor {
|
||||||
public:
|
public:
|
||||||
AcceptorUnixDomain(asio_ns::io_context& ioService, Endpoint* endpoint)
|
AcceptorUnixDomain(rest::Scheduler* scheduler, Endpoint* endpoint)
|
||||||
: Acceptor(ioService, endpoint), _acceptor(ioService) {}
|
: Acceptor(scheduler, endpoint),
|
||||||
|
_acceptor(scheduler->newDomainAcceptor()) {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void open() override;
|
void open() override;
|
||||||
|
@ -37,7 +38,7 @@ class AcceptorUnixDomain final : public Acceptor {
|
||||||
void asyncAccept(AcceptHandler const& handler) override;
|
void asyncAccept(AcceptHandler const& handler) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
asio_ns::local::stream_protocol::acceptor _acceptor;
|
std::unique_ptr<asio_ns::local::stream_protocol::acceptor> _acceptor;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
/// DISCLAIMER
|
|
||||||
///
|
|
||||||
/// Copyright 2014-2018 ArangoDB GmbH, Cologne, Germany
|
|
||||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
|
||||||
///
|
|
||||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
/// you may not use this file except in compliance with the License.
|
|
||||||
/// You may obtain a copy of the License at
|
|
||||||
///
|
|
||||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
///
|
|
||||||
/// Unless required by applicable law or agreed to in writing, software
|
|
||||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
/// See the License for the specific language governing permissions and
|
|
||||||
/// limitations under the License.
|
|
||||||
///
|
|
||||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
|
||||||
///
|
|
||||||
/// @author Dr. Frank Celler
|
|
||||||
/// @author Achim Brandt
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
#ifndef ARANGOD_SCHEDULER_EVENTS_H
|
|
||||||
#define ARANGOD_SCHEDULER_EVENTS_H 1
|
|
||||||
|
|
||||||
#include "Basics/Common.h"
|
|
||||||
|
|
||||||
#include "Scheduler/Socket.h"
|
|
||||||
|
|
||||||
namespace arangodb {
|
|
||||||
namespace rest {
|
|
||||||
class Scheduler;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct EventLoop {
|
|
||||||
EventLoop(asio_ns::io_context* service, rest::Scheduler* schdlr)
|
|
||||||
: ioContext(service), scheduler(schdlr) {}
|
|
||||||
|
|
||||||
EventLoop() : EventLoop(nullptr, nullptr) {}
|
|
||||||
|
|
||||||
asio_ns::io_context* ioContext;
|
|
||||||
rest::Scheduler* scheduler;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include "Basics/Common.h"
|
#include "Basics/Common.h"
|
||||||
|
|
||||||
#include "Basics/SameThreadAsserter.h"
|
#include "Basics/SameThreadAsserter.h"
|
||||||
#include "Scheduler/EventLoop.h"
|
|
||||||
#include "Scheduler/Scheduler.h"
|
#include "Scheduler/Scheduler.h"
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
|
@ -39,8 +38,8 @@ class JobGuard : public SameThreadAsserter {
|
||||||
JobGuard(JobGuard const&) = delete;
|
JobGuard(JobGuard const&) = delete;
|
||||||
JobGuard& operator=(JobGuard const&) = delete;
|
JobGuard& operator=(JobGuard const&) = delete;
|
||||||
|
|
||||||
explicit JobGuard(EventLoop const& loop) : SameThreadAsserter(), _scheduler(loop.scheduler) {}
|
explicit JobGuard(rest::Scheduler* scheduler)
|
||||||
explicit JobGuard(rest::Scheduler* scheduler) : SameThreadAsserter(), _scheduler(scheduler) {}
|
: SameThreadAsserter(), _scheduler(scheduler) {}
|
||||||
~JobGuard() { release(); }
|
~JobGuard() { release(); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -37,11 +37,11 @@ using namespace arangodb::rest;
|
||||||
// --SECTION-- constructors and destructors
|
// --SECTION-- constructors and destructors
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
ListenTask::ListenTask(EventLoop loop, Endpoint* endpoint)
|
ListenTask::ListenTask(Scheduler* scheduler, Endpoint* endpoint)
|
||||||
: Task(loop, "ListenTask"),
|
: Task(scheduler, "ListenTask"),
|
||||||
_endpoint(endpoint),
|
_endpoint(endpoint),
|
||||||
_bound(false),
|
_bound(false),
|
||||||
_acceptor(Acceptor::factory(*loop.ioContext, endpoint)) {}
|
_acceptor(Acceptor::factory(scheduler, endpoint)) {}
|
||||||
|
|
||||||
ListenTask::~ListenTask() {}
|
ListenTask::~ListenTask() {}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ bool ListenTask::start() {
|
||||||
|
|
||||||
_handler = [this](asio_ns::error_code const& ec) {
|
_handler = [this](asio_ns::error_code const& ec) {
|
||||||
MUTEX_LOCKER(mutex, _shutdownMutex);
|
MUTEX_LOCKER(mutex, _shutdownMutex);
|
||||||
JobGuard guard(_loop);
|
JobGuard guard(_scheduler);
|
||||||
guard.work();
|
guard.work();
|
||||||
|
|
||||||
if (!_bound) {
|
if (!_bound) {
|
||||||
|
|
|
@ -39,7 +39,7 @@ class ListenTask : virtual public rest::Task {
|
||||||
static size_t const MAX_ACCEPT_ERRORS = 128;
|
static size_t const MAX_ACCEPT_ERRORS = 128;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ListenTask(EventLoop, Endpoint*);
|
ListenTask(rest::Scheduler*, Endpoint*);
|
||||||
~ListenTask();
|
~ListenTask();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "Random/RandomGenerator.h"
|
#include "Random/RandomGenerator.h"
|
||||||
#include "Rest/GeneralResponse.h"
|
#include "Rest/GeneralResponse.h"
|
||||||
|
#include "Scheduler/Acceptor.h"
|
||||||
#include "Scheduler/JobGuard.h"
|
#include "Scheduler/JobGuard.h"
|
||||||
#include "Scheduler/JobQueue.h"
|
#include "Scheduler/JobQueue.h"
|
||||||
#include "Scheduler/Task.h"
|
#include "Scheduler/Task.h"
|
||||||
|
@ -186,6 +187,9 @@ Scheduler::~Scheduler() {
|
||||||
void Scheduler::post(std::function<void()> callback) {
|
void Scheduler::post(std::function<void()> callback) {
|
||||||
++_nrQueued;
|
++_nrQueued;
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
// capture without self, ioContext will not live longer than scheduler
|
||||||
_ioContext.get()->post([this, callback]() {
|
_ioContext.get()->post([this, callback]() {
|
||||||
--_nrQueued;
|
--_nrQueued;
|
||||||
|
|
||||||
|
@ -194,6 +198,31 @@ void Scheduler::post(std::function<void()> callback) {
|
||||||
|
|
||||||
callback();
|
callback();
|
||||||
});
|
});
|
||||||
|
} catch (...) {
|
||||||
|
--_nrQueued;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Scheduler::post(asio_ns::io_context::strand& strand,
|
||||||
|
std::function<void()> callback) {
|
||||||
|
++_nrQueued;
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
// capture without self, ioContext will not live longer than scheduler
|
||||||
|
strand.post([this, callback]() {
|
||||||
|
--_nrQueued;
|
||||||
|
|
||||||
|
JobGuard guard(this);
|
||||||
|
guard.work();
|
||||||
|
|
||||||
|
callback();
|
||||||
|
});
|
||||||
|
} catch (...) {
|
||||||
|
--_nrQueued;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Scheduler::start() {
|
bool Scheduler::start() {
|
||||||
|
@ -431,6 +460,7 @@ void Scheduler::rebalanceThreads() {
|
||||||
|
|
||||||
// all threads are maxed out
|
// all threads are maxed out
|
||||||
_lastAllBusyStamp = now;
|
_lastAllBusyStamp = now;
|
||||||
|
|
||||||
// increase nrRunning by one here already, while holding the lock
|
// increase nrRunning by one here already, while holding the lock
|
||||||
incRunning();
|
incRunning();
|
||||||
}
|
}
|
||||||
|
@ -512,3 +542,4 @@ void Scheduler::initializeSignalHandlers() {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,23 +28,22 @@
|
||||||
#include "Basics/Common.h"
|
#include "Basics/Common.h"
|
||||||
|
|
||||||
#include "Basics/Mutex.h"
|
#include "Basics/Mutex.h"
|
||||||
|
#include "Basics/asio_ns.h"
|
||||||
#include "Basics/socket-utils.h"
|
#include "Basics/socket-utils.h"
|
||||||
#include "Scheduler/EventLoop.h"
|
#include "Endpoint/Endpoint.h"
|
||||||
#include "Scheduler/Job.h"
|
#include "Scheduler/Job.h"
|
||||||
#include "Scheduler/Socket.h"
|
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
class JobQueue;
|
class Acceptor;
|
||||||
class JobGuard;
|
class JobGuard;
|
||||||
|
class JobQueue;
|
||||||
|
class ListenTask;
|
||||||
|
|
||||||
namespace velocypack {
|
namespace velocypack {
|
||||||
class Builder;
|
class Builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
class ListenTask;
|
|
||||||
|
|
||||||
namespace rest {
|
namespace rest {
|
||||||
|
|
||||||
class GeneralCommTask;
|
class GeneralCommTask;
|
||||||
class SocketTask;
|
class SocketTask;
|
||||||
|
|
||||||
|
@ -63,17 +62,11 @@ class Scheduler {
|
||||||
virtual ~Scheduler();
|
virtual ~Scheduler();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
asio_ns::io_context* ioContext() const { return _ioContext.get(); }
|
// XXX-TODO remove, replace with signal handler
|
||||||
asio_ns::io_context* managerService() const { return _managerService.get(); }
|
asio_ns::io_context* managerService() const { return _managerService.get(); }
|
||||||
|
|
||||||
EventLoop eventLoop() {
|
|
||||||
// cannot use
|
|
||||||
// return EventLoop{._ioService = *_ioService.get(), ._scheduler = this};
|
|
||||||
// because windows complains ...
|
|
||||||
return EventLoop{_ioContext.get(), this};
|
|
||||||
}
|
|
||||||
|
|
||||||
void post(std::function<void()> callback);
|
void post(std::function<void()> callback);
|
||||||
|
void post(asio_ns::io_context::strand&, std::function<void()> callback);
|
||||||
|
|
||||||
bool start();
|
bool start();
|
||||||
bool isRunning() const { return numRunning(_counters) > 0; }
|
bool isRunning() const { return numRunning(_counters) > 0; }
|
||||||
|
@ -83,6 +76,50 @@ class Scheduler {
|
||||||
bool isStopping() { return (_counters & (1ULL << 63)) != 0; }
|
bool isStopping() { return (_counters & (1ULL << 63)) != 0; }
|
||||||
void shutdown();
|
void shutdown();
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
asio_ns::deadline_timer* newDeadlineTimer(T timeout) {
|
||||||
|
return new asio_ns::deadline_timer(*_ioContext, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::steady_timer* newSteadyTimer() {
|
||||||
|
return new asio_ns::steady_timer(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::io_context::strand* newStrand() {
|
||||||
|
return new asio_ns::io_context::strand(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::ip::tcp::acceptor*
|
||||||
|
newAcceptor() {
|
||||||
|
return new asio_ns::ip::tcp::acceptor(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::local::stream_protocol::acceptor*
|
||||||
|
newDomainAcceptor() {
|
||||||
|
return new asio_ns::local::stream_protocol::acceptor(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::ip::tcp::socket*
|
||||||
|
newSocket() {
|
||||||
|
return new asio_ns::ip::tcp::socket(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::local::stream_protocol::socket*
|
||||||
|
newDomainSocket() {
|
||||||
|
return new asio_ns::local::stream_protocol::socket(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::ssl::stream<asio_ns::ip::tcp::socket>*
|
||||||
|
newSslSocket(asio_ns::ssl::context& context) {
|
||||||
|
return new asio_ns::ssl::stream<asio_ns::ip::tcp::socket>(
|
||||||
|
*_ioContext, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
asio_ns::ip::tcp::resolver*
|
||||||
|
newResolver() {
|
||||||
|
return new asio_ns::ip::tcp::resolver(*_ioContext);
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// decrements the nrRunning counter for the thread
|
// decrements the nrRunning counter for the thread
|
||||||
void stopThread();
|
void stopThread();
|
||||||
|
@ -102,21 +139,21 @@ class Scheduler {
|
||||||
|
|
||||||
uint64_t minimum() const { return _nrMinimum; }
|
uint64_t minimum() const { return _nrMinimum; }
|
||||||
|
|
||||||
// number of queued handlers
|
// number of jobs that are currently been posted to the io_context,
|
||||||
|
// but where the handler has not yet been called. The number of
|
||||||
|
// handler in total is numQueued() + numRunning(_counters)
|
||||||
inline uint64_t numQueued() const noexcept { return _nrQueued; };
|
inline uint64_t numQueued() const noexcept { return _nrQueued; };
|
||||||
|
|
||||||
inline uint64_t getCounters() const noexcept { return _counters; }
|
inline uint64_t getCounters() const noexcept { return _counters; }
|
||||||
|
|
||||||
// number of running threads
|
|
||||||
static uint64_t numRunning(uint64_t value) noexcept {
|
static uint64_t numRunning(uint64_t value) noexcept {
|
||||||
return value & 0xFFFFULL;
|
return value & 0xFFFFULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// number of working threads
|
|
||||||
static uint64_t numWorking(uint64_t value) noexcept {
|
static uint64_t numWorking(uint64_t value) noexcept {
|
||||||
return (value >> 16) & 0xFFFFULL;
|
return (value >> 16) & 0xFFFFULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// number of blocked threads
|
|
||||||
static uint64_t numBlocked(uint64_t value) noexcept {
|
static uint64_t numBlocked(uint64_t value) noexcept {
|
||||||
return (value >> 32) & 0xFFFFULL;
|
return (value >> 32) & 0xFFFFULL;
|
||||||
}
|
}
|
||||||
|
@ -142,13 +179,15 @@ class Scheduler {
|
||||||
// AA BB CC DD
|
// AA BB CC DD
|
||||||
//
|
//
|
||||||
// we use the lowest 2 bytes (DD) to store the number of running threads
|
// we use the lowest 2 bytes (DD) to store the number of running threads
|
||||||
|
//
|
||||||
// the next lowest bytes (CC) are used to store the number of currently
|
// the next lowest bytes (CC) are used to store the number of currently
|
||||||
// working threads
|
// working threads
|
||||||
|
//
|
||||||
// the next bytes (BB) are used to store the number of currently blocked
|
// the next bytes (BB) are used to store the number of currently blocked
|
||||||
// threads
|
// threads
|
||||||
|
//
|
||||||
// the highest bytes (AA) are used only to encode a stopping bit. when this
|
// the highest bytes (AA) are used only to encode a stopping bit. when this
|
||||||
// bit is
|
// bit is set, the scheduler is stopping (or already stopped)
|
||||||
// set, the scheduler is stopping (or already stopped)
|
|
||||||
inline void setStopping() noexcept { _counters |= (1ULL << 63); }
|
inline void setStopping() noexcept { _counters |= (1ULL << 63); }
|
||||||
|
|
||||||
inline void incRunning() noexcept { _counters += 1ULL << 0; }
|
inline void incRunning() noexcept { _counters += 1ULL << 0; }
|
||||||
|
@ -192,7 +231,6 @@ class Scheduler {
|
||||||
// meaning of its individual bits
|
// meaning of its individual bits
|
||||||
std::atomic<uint64_t> _counters;
|
std::atomic<uint64_t> _counters;
|
||||||
|
|
||||||
// number of jobs that are currently been queued, but not worked on
|
|
||||||
std::atomic<uint64_t> _nrQueued;
|
std::atomic<uint64_t> _nrQueued;
|
||||||
|
|
||||||
std::unique_ptr<JobQueue> _jobQueue;
|
std::unique_ptr<JobQueue> _jobQueue;
|
||||||
|
|
|
@ -28,19 +28,27 @@
|
||||||
#include "Basics/StringBuffer.h"
|
#include "Basics/StringBuffer.h"
|
||||||
#include "Basics/asio_ns.h"
|
#include "Basics/asio_ns.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
|
#include "Scheduler/JobGuard.h"
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
|
namespace rest {
|
||||||
|
class Scheduler;
|
||||||
|
}
|
||||||
|
|
||||||
typedef std::function<void(const asio_ns::error_code& ec,
|
typedef std::function<void(const asio_ns::error_code& ec,
|
||||||
std::size_t transferred)>
|
std::size_t transferred)>
|
||||||
AsyncHandler;
|
AsyncHandler;
|
||||||
|
|
||||||
class Socket {
|
class Socket {
|
||||||
public:
|
public:
|
||||||
Socket(asio_ns::io_context& ioContext, bool encrypted)
|
Socket(rest::Scheduler* scheduler, bool encrypted)
|
||||||
: _encrypted(encrypted), strand(ioContext) {}
|
: _strand(scheduler->newStrand()),
|
||||||
|
_encrypted(encrypted),
|
||||||
|
_scheduler(scheduler) {
|
||||||
|
TRI_ASSERT(_scheduler != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
Socket(Socket const& that) = delete;
|
Socket(Socket const& that) = delete;
|
||||||
|
|
||||||
Socket(Socket&& that) = delete;
|
Socket(Socket&& that) = delete;
|
||||||
|
|
||||||
virtual ~Socket() {}
|
virtual ~Socket() {}
|
||||||
|
@ -76,6 +84,12 @@ class Socket {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void post(std::function<void()> handler) {
|
||||||
|
_scheduler->post(*_strand, handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool runningInThisThread() { return _strand->running_in_this_thread(); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual std::string peerAddress() const = 0;
|
virtual std::string peerAddress() const = 0;
|
||||||
virtual int peerPort() const = 0;
|
virtual int peerPort() const = 0;
|
||||||
|
@ -96,13 +110,14 @@ class Socket {
|
||||||
virtual void shutdownReceive(asio_ns::error_code& ec) = 0;
|
virtual void shutdownReceive(asio_ns::error_code& ec) = 0;
|
||||||
virtual void shutdownSend(asio_ns::error_code& ec) = 0;
|
virtual void shutdownSend(asio_ns::error_code& ec) = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// strand to ensure the connection's handlers are not called concurrently.
|
||||||
|
std::unique_ptr<asio_ns::io_context::strand> _strand;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool const _encrypted;
|
bool const _encrypted;
|
||||||
bool _handshakeDone = false;
|
bool _handshakeDone = false;
|
||||||
|
rest::Scheduler* _scheduler;
|
||||||
public:
|
|
||||||
// strand to ensure the connection's handlers are not called concurrently.
|
|
||||||
asio_ns::io_context::strand strand;
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ bool SocketSslTcp::sslHandshake() {
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
ec.clear();
|
ec.clear();
|
||||||
_sslSocket.handshake(asio_ns::ssl::stream_base::handshake_type::server, ec);
|
_sslSocket->handshake(asio_ns::ssl::stream_base::handshake_type::server, ec);
|
||||||
|
|
||||||
if (ec.value() != asio_ns::error::would_block) {
|
if (ec.value() != asio_ns::error::would_block) {
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -33,11 +33,11 @@ class SocketSslTcp final : public Socket {
|
||||||
friend class AcceptorTcp;
|
friend class AcceptorTcp;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SocketSslTcp(asio_ns::io_context& ioService, asio_ns::ssl::context&& context)
|
SocketSslTcp(rest::Scheduler* scheduler, asio_ns::ssl::context&& context)
|
||||||
: Socket(ioService, /*encrypted*/ true),
|
: Socket(scheduler, /*encrypted*/ true),
|
||||||
_sslContext(std::move(context)),
|
_sslContext(std::move(context)),
|
||||||
_sslSocket(ioService, _sslContext),
|
_sslSocket(scheduler->newSslSocket(_sslContext)),
|
||||||
_socket(_sslSocket.next_layer()),
|
_socket(_sslSocket->next_layer()),
|
||||||
_peerEndpoint() {}
|
_peerEndpoint() {}
|
||||||
|
|
||||||
SocketSslTcp(SocketSslTcp const& that) = delete;
|
SocketSslTcp(SocketSslTcp const& that) = delete;
|
||||||
|
@ -55,23 +55,23 @@ class SocketSslTcp final : public Socket {
|
||||||
|
|
||||||
size_t writeSome(basics::StringBuffer* buffer,
|
size_t writeSome(basics::StringBuffer* buffer,
|
||||||
asio_ns::error_code& ec) override {
|
asio_ns::error_code& ec) override {
|
||||||
return _sslSocket.write_some(
|
return _sslSocket->write_some(
|
||||||
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
|
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) override {
|
AsyncHandler const& handler) override {
|
||||||
return asio_ns::async_write(_sslSocket, buffer, strand.wrap(handler));
|
return asio_ns::async_write(*_sslSocket, buffer, _strand->wrap(handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
|
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
asio_ns::error_code& ec) override {
|
asio_ns::error_code& ec) override {
|
||||||
return _sslSocket.read_some(buffer, ec);
|
return _sslSocket->read_some(buffer, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) override {
|
AsyncHandler const& handler) override {
|
||||||
return _sslSocket.async_read_some(buffer, strand.wrap(handler));
|
return _sslSocket->async_read_some(buffer, _strand->wrap(handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t available(asio_ns::error_code& ec) override {
|
std::size_t available(asio_ns::error_code& ec) override {
|
||||||
|
@ -101,7 +101,7 @@ class SocketSslTcp final : public Socket {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
asio_ns::ssl::context _sslContext;
|
asio_ns::ssl::context _sslContext;
|
||||||
asio_ns::ssl::stream<asio_ns::ip::tcp::socket> _sslSocket;
|
std::unique_ptr<asio_ns::ssl::stream<asio_ns::ip::tcp::socket>> _sslSocket;
|
||||||
asio_ns::ip::tcp::socket& _socket;
|
asio_ns::ip::tcp::socket& _socket;
|
||||||
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
|
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,7 +31,6 @@
|
||||||
#include "Basics/socket-utils.h"
|
#include "Basics/socket-utils.h"
|
||||||
#include "Endpoint/ConnectionInfo.h"
|
#include "Endpoint/ConnectionInfo.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "Scheduler/EventLoop.h"
|
|
||||||
#include "Scheduler/JobGuard.h"
|
#include "Scheduler/JobGuard.h"
|
||||||
#include "Scheduler/Scheduler.h"
|
#include "Scheduler/Scheduler.h"
|
||||||
#include "Scheduler/SchedulerFeature.h"
|
#include "Scheduler/SchedulerFeature.h"
|
||||||
|
@ -45,11 +44,11 @@ using namespace arangodb::rest;
|
||||||
// --SECTION-- constructors and destructors
|
// --SECTION-- constructors and destructors
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
SocketTask::SocketTask(arangodb::EventLoop loop,
|
SocketTask::SocketTask(Scheduler* scheduler,
|
||||||
std::unique_ptr<arangodb::Socket> socket,
|
std::unique_ptr<arangodb::Socket> socket,
|
||||||
arangodb::ConnectionInfo&& connectionInfo,
|
arangodb::ConnectionInfo&& connectionInfo,
|
||||||
double keepAliveTimeout, bool skipInit = false)
|
double keepAliveTimeout, bool skipInit = false)
|
||||||
: Task(loop, "SocketTask"),
|
: Task(scheduler, "SocketTask"),
|
||||||
_peer(std::move(socket)),
|
_peer(std::move(socket)),
|
||||||
_connectionInfo(std::move(connectionInfo)),
|
_connectionInfo(std::move(connectionInfo)),
|
||||||
_connectionStatistics(nullptr),
|
_connectionStatistics(nullptr),
|
||||||
|
@ -57,7 +56,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
|
||||||
_stringBuffers{_stringBuffersArena},
|
_stringBuffers{_stringBuffersArena},
|
||||||
_writeBuffer(nullptr, nullptr),
|
_writeBuffer(nullptr, nullptr),
|
||||||
_keepAliveTimeout(static_cast<long>(keepAliveTimeout * 1000)),
|
_keepAliveTimeout(static_cast<long>(keepAliveTimeout * 1000)),
|
||||||
_keepAliveTimer(*_loop.ioContext, _keepAliveTimeout),
|
_keepAliveTimer(scheduler->newDeadlineTimer(_keepAliveTimeout)),
|
||||||
_useKeepAliveTimer(keepAliveTimeout > 0.0),
|
_useKeepAliveTimer(keepAliveTimeout > 0.0),
|
||||||
_keepAliveTimerActive(false),
|
_keepAliveTimerActive(false),
|
||||||
_closeRequested(false),
|
_closeRequested(false),
|
||||||
|
@ -88,7 +87,7 @@ SocketTask::~SocketTask() {
|
||||||
|
|
||||||
asio_ns::error_code err;
|
asio_ns::error_code err;
|
||||||
if (_keepAliveTimerActive.load(std::memory_order_relaxed)) {
|
if (_keepAliveTimerActive.load(std::memory_order_relaxed)) {
|
||||||
_keepAliveTimer.cancel(err);
|
_keepAliveTimer->cancel(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -132,13 +131,11 @@ bool SocketTask::start() {
|
||||||
<< _connectionInfo.clientPort;
|
<< _connectionInfo.clientPort;
|
||||||
|
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
_loop.scheduler->_nrQueued++;
|
|
||||||
_peer->strand.post([self, this]() {
|
_peer->post([self, this]() {
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
asyncReadSome();
|
asyncReadSome();
|
||||||
});
|
});
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,7 +145,7 @@ bool SocketTask::start() {
|
||||||
|
|
||||||
// caller must hold the _lock
|
// caller must hold the _lock
|
||||||
void SocketTask::addWriteBuffer(WriteBuffer&& buffer) {
|
void SocketTask::addWriteBuffer(WriteBuffer&& buffer) {
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
if (_closedSend.load(std::memory_order_acquire) ||
|
if (_closedSend.load(std::memory_order_acquire) ||
|
||||||
_abandoned.load(std::memory_order_acquire)) {
|
_abandoned.load(std::memory_order_acquire)) {
|
||||||
|
@ -172,7 +169,7 @@ void SocketTask::addWriteBuffer(WriteBuffer&& buffer) {
|
||||||
// caller must hold the _lock
|
// caller must hold the _lock
|
||||||
bool SocketTask::completedWriteBuffer() {
|
bool SocketTask::completedWriteBuffer() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
RequestStatistics::SET_WRITE_END(_writeBuffer._statistics);
|
RequestStatistics::SET_WRITE_END(_writeBuffer._statistics);
|
||||||
_writeBuffer.release(this); // try to recycle the string buffer
|
_writeBuffer.release(this); // try to recycle the string buffer
|
||||||
|
@ -197,11 +194,8 @@ void SocketTask::closeStream() {
|
||||||
// strand::dispatch may execute this immediately if this
|
// strand::dispatch may execute this immediately if this
|
||||||
// is called on a thread inside the same strand
|
// is called on a thread inside the same strand
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
_loop.scheduler->_nrQueued++;
|
|
||||||
_peer->strand.post([self, this] {
|
_peer->post([self, this] {
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
closeStreamNoLock();
|
closeStreamNoLock();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -209,7 +203,7 @@ void SocketTask::closeStream() {
|
||||||
// caller must hold the _lock
|
// caller must hold the _lock
|
||||||
void SocketTask::closeStreamNoLock() {
|
void SocketTask::closeStreamNoLock() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
bool mustCloseSend = !_closedSend.load(std::memory_order_acquire);
|
bool mustCloseSend = !_closedSend.load(std::memory_order_acquire);
|
||||||
bool mustCloseReceive = !_closedReceive.load(std::memory_order_acquire);
|
bool mustCloseReceive = !_closedReceive.load(std::memory_order_acquire);
|
||||||
|
@ -223,7 +217,7 @@ void SocketTask::closeStreamNoLock() {
|
||||||
_closedSend.store(true, std::memory_order_release);
|
_closedSend.store(true, std::memory_order_release);
|
||||||
_closedReceive.store(true, std::memory_order_release);
|
_closedReceive.store(true, std::memory_order_release);
|
||||||
_closeRequested.store(false, std::memory_order_release);
|
_closeRequested.store(false, std::memory_order_release);
|
||||||
_keepAliveTimer.cancel();
|
_keepAliveTimer->cancel();
|
||||||
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
|
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,7 +228,8 @@ void SocketTask::closeStreamNoLock() {
|
||||||
// will acquire the _lock
|
// will acquire the _lock
|
||||||
void SocketTask::addToReadBuffer(char const* data, std::size_t len) {
|
void SocketTask::addToReadBuffer(char const* data, std::size_t len) {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
_readBuffer.appendText(data, len);
|
_readBuffer.appendText(data, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,7 +237,7 @@ void SocketTask::addToReadBuffer(char const* data, std::size_t len) {
|
||||||
void SocketTask::resetKeepAlive() {
|
void SocketTask::resetKeepAlive() {
|
||||||
if (_useKeepAliveTimer) {
|
if (_useKeepAliveTimer) {
|
||||||
asio_ns::error_code err;
|
asio_ns::error_code err;
|
||||||
_keepAliveTimer.expires_from_now(_keepAliveTimeout, err);
|
_keepAliveTimer->expires_from_now(_keepAliveTimeout, err);
|
||||||
if (err) {
|
if (err) {
|
||||||
closeStream();
|
closeStream();
|
||||||
return;
|
return;
|
||||||
|
@ -250,7 +245,7 @@ void SocketTask::resetKeepAlive() {
|
||||||
|
|
||||||
_keepAliveTimerActive.store(true, std::memory_order_relaxed);
|
_keepAliveTimerActive.store(true, std::memory_order_relaxed);
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
_keepAliveTimer.async_wait([self, this](const asio_ns::error_code& error) {
|
_keepAliveTimer->async_wait([self, this](const asio_ns::error_code& error) {
|
||||||
if (!error) { // error will be true if timer was canceled
|
if (!error) { // error will be true if timer was canceled
|
||||||
LOG_TOPIC(ERR, Logger::COMMUNICATION)
|
LOG_TOPIC(ERR, Logger::COMMUNICATION)
|
||||||
<< "keep alive timout - closing stream!";
|
<< "keep alive timout - closing stream!";
|
||||||
|
@ -265,7 +260,7 @@ void SocketTask::cancelKeepAlive() {
|
||||||
if (_useKeepAliveTimer &&
|
if (_useKeepAliveTimer &&
|
||||||
_keepAliveTimerActive.load(std::memory_order_relaxed)) {
|
_keepAliveTimerActive.load(std::memory_order_relaxed)) {
|
||||||
asio_ns::error_code err;
|
asio_ns::error_code err;
|
||||||
_keepAliveTimer.cancel(err);
|
_keepAliveTimer->cancel(err);
|
||||||
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
|
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -273,7 +268,8 @@ void SocketTask::cancelKeepAlive() {
|
||||||
// caller must hold the _lock
|
// caller must hold the _lock
|
||||||
bool SocketTask::reserveMemory() {
|
bool SocketTask::reserveMemory() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
if (_readBuffer.reserve(READ_BLOCK_SIZE + 1) == TRI_ERROR_OUT_OF_MEMORY) {
|
if (_readBuffer.reserve(READ_BLOCK_SIZE + 1) == TRI_ERROR_OUT_OF_MEMORY) {
|
||||||
LOG_TOPIC(WARN, arangodb::Logger::COMMUNICATION)
|
LOG_TOPIC(WARN, arangodb::Logger::COMMUNICATION)
|
||||||
<< "out of memory while reading from client";
|
<< "out of memory while reading from client";
|
||||||
|
@ -291,7 +287,7 @@ bool SocketTask::trySyncRead() {
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
asio_ns::error_code err;
|
asio_ns::error_code err;
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
|
@ -339,7 +335,7 @@ bool SocketTask::trySyncRead() {
|
||||||
// (new read)
|
// (new read)
|
||||||
bool SocketTask::processAll() {
|
bool SocketTask::processAll() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
double startTime = StatisticsFeature::time();
|
double startTime = StatisticsFeature::time();
|
||||||
Result res;
|
Result res;
|
||||||
|
@ -381,7 +377,7 @@ bool SocketTask::processAll() {
|
||||||
// must be invoked on strand
|
// must be invoked on strand
|
||||||
void SocketTask::asyncReadSome() {
|
void SocketTask::asyncReadSome() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
size_t const MAX_DIRECT_TRIES = 2;
|
size_t const MAX_DIRECT_TRIES = 2;
|
||||||
|
@ -434,7 +430,7 @@ void SocketTask::asyncReadSome() {
|
||||||
_peer->asyncRead(
|
_peer->asyncRead(
|
||||||
asio_ns::buffer(_readBuffer.end(), READ_BLOCK_SIZE),
|
asio_ns::buffer(_readBuffer.end(), READ_BLOCK_SIZE),
|
||||||
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
|
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
|
||||||
JobGuard guard(_loop);
|
JobGuard guard(_scheduler);
|
||||||
guard.work();
|
guard.work();
|
||||||
|
|
||||||
if (_abandoned.load(std::memory_order_acquire)) {
|
if (_abandoned.load(std::memory_order_acquire)) {
|
||||||
|
@ -446,19 +442,11 @@ void SocketTask::asyncReadSome() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_loop.scheduler->_nrQueued++;
|
_peer->post([self, this, transferred] {
|
||||||
_peer->strand.post([self, this, transferred] {
|
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
|
|
||||||
_readBuffer.increaseLength(transferred);
|
_readBuffer.increaseLength(transferred);
|
||||||
|
|
||||||
if (processAll()) {
|
if (processAll()) {
|
||||||
_loop.scheduler->_nrQueued++;
|
_peer->post([self, this]() {
|
||||||
_peer->strand.post([self, this]() {
|
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
asyncReadSome();
|
asyncReadSome();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -469,7 +457,7 @@ void SocketTask::asyncReadSome() {
|
||||||
|
|
||||||
void SocketTask::asyncWriteSome() {
|
void SocketTask::asyncWriteSome() {
|
||||||
TRI_ASSERT(_peer != nullptr);
|
TRI_ASSERT(_peer != nullptr);
|
||||||
TRI_ASSERT(_peer->strand.running_in_this_thread());
|
TRI_ASSERT(_peer->runningInThisThread());
|
||||||
|
|
||||||
if (_writeBuffer.empty()) {
|
if (_writeBuffer.empty()) {
|
||||||
return;
|
return;
|
||||||
|
@ -525,7 +513,7 @@ void SocketTask::asyncWriteSome() {
|
||||||
_peer->asyncWrite(
|
_peer->asyncWrite(
|
||||||
asio_ns::buffer(_writeBuffer._buffer->begin() + written, total - written),
|
asio_ns::buffer(_writeBuffer._buffer->begin() + written, total - written),
|
||||||
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
|
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
|
||||||
JobGuard guard(_loop);
|
JobGuard guard(_scheduler);
|
||||||
guard.work();
|
guard.work();
|
||||||
|
|
||||||
if (_abandoned.load(std::memory_order_acquire)) {
|
if (_abandoned.load(std::memory_order_acquire)) {
|
||||||
|
@ -537,12 +525,7 @@ void SocketTask::asyncWriteSome() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_loop.scheduler->_nrQueued++;
|
_peer->post([self, this, transferred] {
|
||||||
_peer->strand.post([self, this, transferred] {
|
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
|
|
||||||
if (_abandoned.load(std::memory_order_acquire)) {
|
if (_abandoned.load(std::memory_order_acquire)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -551,11 +534,7 @@ void SocketTask::asyncWriteSome() {
|
||||||
transferred);
|
transferred);
|
||||||
|
|
||||||
if (completedWriteBuffer()) {
|
if (completedWriteBuffer()) {
|
||||||
_loop.scheduler->_nrQueued++;
|
_peer->post([self, this] {
|
||||||
_peer->strand.post([self, this] {
|
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
if (!_abandoned.load(std::memory_order_acquire)) {
|
if (!_abandoned.load(std::memory_order_acquire)) {
|
||||||
asyncWriteSome();
|
asyncWriteSome();
|
||||||
}
|
}
|
||||||
|
@ -617,11 +596,8 @@ void SocketTask::returnStringBuffer(StringBuffer* buffer) {
|
||||||
void SocketTask::triggerProcessAll() {
|
void SocketTask::triggerProcessAll() {
|
||||||
// try to process remaining request data
|
// try to process remaining request data
|
||||||
auto self = shared_from_this();
|
auto self = shared_from_this();
|
||||||
_loop.scheduler->_nrQueued++;
|
|
||||||
_peer->strand.post([self, this] {
|
_peer->post([self, this] {
|
||||||
_loop.scheduler->_nrQueued--;
|
|
||||||
JobGuard guard(_loop);
|
|
||||||
guard.work();
|
|
||||||
processAll();
|
processAll();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ class SocketTask : virtual public Task {
|
||||||
static size_t const READ_BLOCK_SIZE = 10000;
|
static size_t const READ_BLOCK_SIZE = 10000;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SocketTask(EventLoop, std::unique_ptr<Socket>, ConnectionInfo&&,
|
SocketTask(Scheduler*, std::unique_ptr<Socket>, ConnectionInfo&&,
|
||||||
double keepAliveTimeout, bool skipInit);
|
double keepAliveTimeout, bool skipInit);
|
||||||
|
|
||||||
virtual ~SocketTask();
|
virtual ~SocketTask();
|
||||||
|
@ -178,7 +178,7 @@ class SocketTask : virtual public Task {
|
||||||
std::list<WriteBuffer> _writeBuffers;
|
std::list<WriteBuffer> _writeBuffers;
|
||||||
|
|
||||||
boost::posix_time::milliseconds _keepAliveTimeout;
|
boost::posix_time::milliseconds _keepAliveTimeout;
|
||||||
asio_ns::deadline_timer _keepAliveTimer;
|
std::unique_ptr<asio_ns::deadline_timer> _keepAliveTimer;
|
||||||
bool const _useKeepAliveTimer;
|
bool const _useKeepAliveTimer;
|
||||||
|
|
||||||
std::atomic<bool> _keepAliveTimerActive;
|
std::atomic<bool> _keepAliveTimerActive;
|
||||||
|
|
|
@ -33,9 +33,9 @@ class SocketTcp final : public Socket {
|
||||||
friend class AcceptorTcp;
|
friend class AcceptorTcp;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SocketTcp(asio_ns::io_context& ioService)
|
SocketTcp(rest::Scheduler* scheduler)
|
||||||
: Socket(ioService, /*encrypted*/ false),
|
: Socket(scheduler, /*encrypted*/ false),
|
||||||
_socket(ioService),
|
_socket(scheduler->newSocket()),
|
||||||
_peerEndpoint() {}
|
_peerEndpoint() {}
|
||||||
|
|
||||||
SocketTcp(SocketTcp const& that) = delete;
|
SocketTcp(SocketTcp const& that) = delete;
|
||||||
|
@ -48,32 +48,32 @@ class SocketTcp final : public Socket {
|
||||||
|
|
||||||
int peerPort() const override { return _peerEndpoint.port(); }
|
int peerPort() const override { return _peerEndpoint.port(); }
|
||||||
|
|
||||||
void setNonBlocking(bool v) override { _socket.non_blocking(v); }
|
void setNonBlocking(bool v) override { _socket->non_blocking(v); }
|
||||||
|
|
||||||
size_t writeSome(basics::StringBuffer* buffer,
|
size_t writeSome(basics::StringBuffer* buffer,
|
||||||
asio_ns::error_code& ec) override {
|
asio_ns::error_code& ec) override {
|
||||||
return _socket.write_some(
|
return _socket->write_some(
|
||||||
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
|
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) override {
|
AsyncHandler const& handler) override {
|
||||||
return asio_ns::async_write(_socket, buffer, strand.wrap(handler));
|
return asio_ns::async_write(*_socket, buffer, _strand->wrap(handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
|
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
asio_ns::error_code& ec) override {
|
asio_ns::error_code& ec) override {
|
||||||
return _socket.read_some(buffer, ec);
|
return _socket->read_some(buffer, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) override {
|
AsyncHandler const& handler) override {
|
||||||
return _socket.async_read_some(buffer, strand.wrap(handler));
|
return _socket->async_read_some(buffer, _strand->wrap(handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
void close(asio_ns::error_code& ec) override {
|
void close(asio_ns::error_code& ec) override {
|
||||||
if (_socket.is_open()) {
|
if (_socket->is_open()) {
|
||||||
_socket.close(ec);
|
_socket->close(ec);
|
||||||
if (ec && ec != asio_ns::error::not_connected) {
|
if (ec && ec != asio_ns::error::not_connected) {
|
||||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||||
<< "closing socket failed with: " << ec.message();
|
<< "closing socket failed with: " << ec.message();
|
||||||
|
@ -82,22 +82,22 @@ class SocketTcp final : public Socket {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t available(asio_ns::error_code& ec) override {
|
std::size_t available(asio_ns::error_code& ec) override {
|
||||||
return static_cast<size_t>(_socket.available(ec));
|
return static_cast<size_t>(_socket->available(ec));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool sslHandshake() override { return false; }
|
bool sslHandshake() override { return false; }
|
||||||
|
|
||||||
void shutdownReceive(asio_ns::error_code& ec) override {
|
void shutdownReceive(asio_ns::error_code& ec) override {
|
||||||
_socket.shutdown(asio_ns::ip::tcp::socket::shutdown_receive, ec);
|
_socket->shutdown(asio_ns::ip::tcp::socket::shutdown_receive, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void shutdownSend(asio_ns::error_code& ec) override {
|
void shutdownSend(asio_ns::error_code& ec) override {
|
||||||
_socket.shutdown(asio_ns::ip::tcp::socket::shutdown_send, ec);
|
_socket->shutdown(asio_ns::ip::tcp::socket::shutdown_send, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
asio_ns::ip::tcp::socket _socket;
|
std::unique_ptr<asio_ns::ip::tcp::socket> _socket;
|
||||||
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
|
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,41 +28,41 @@ using namespace arangodb;
|
||||||
|
|
||||||
size_t SocketUnixDomain::writeSome(basics::StringBuffer* buffer,
|
size_t SocketUnixDomain::writeSome(basics::StringBuffer* buffer,
|
||||||
asio_ns::error_code& ec) {
|
asio_ns::error_code& ec) {
|
||||||
return _socket.write_some(asio_ns::buffer(buffer->begin(), buffer->length()),
|
return _socket->write_some(asio_ns::buffer(buffer->begin(), buffer->length()),
|
||||||
ec);
|
ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SocketUnixDomain::asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
void SocketUnixDomain::asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) {
|
AsyncHandler const& handler) {
|
||||||
return asio_ns::async_write(_socket, buffer, handler);
|
return asio_ns::async_write(*_socket, buffer, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t SocketUnixDomain::readSome(asio_ns::mutable_buffers_1 const& buffer,
|
size_t SocketUnixDomain::readSome(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
asio_ns::error_code& ec) {
|
asio_ns::error_code& ec) {
|
||||||
return _socket.read_some(buffer, ec);
|
return _socket->read_some(buffer, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t SocketUnixDomain::available(asio_ns::error_code& ec) {
|
std::size_t SocketUnixDomain::available(asio_ns::error_code& ec) {
|
||||||
return _socket.available(ec);
|
return _socket->available(ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SocketUnixDomain::asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
void SocketUnixDomain::asyncRead(asio_ns::mutable_buffers_1 const& buffer,
|
||||||
AsyncHandler const& handler) {
|
AsyncHandler const& handler) {
|
||||||
return _socket.async_read_some(buffer, handler);
|
return _socket->async_read_some(buffer, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SocketUnixDomain::shutdownReceive(asio_ns::error_code& ec) {
|
void SocketUnixDomain::shutdownReceive(asio_ns::error_code& ec) {
|
||||||
_socket.shutdown(asio_ns::local::stream_protocol::socket::shutdown_receive,
|
_socket->shutdown(asio_ns::local::stream_protocol::socket::shutdown_receive,
|
||||||
ec);
|
ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SocketUnixDomain::shutdownSend(asio_ns::error_code& ec) {
|
void SocketUnixDomain::shutdownSend(asio_ns::error_code& ec) {
|
||||||
_socket.shutdown(asio_ns::local::stream_protocol::socket::shutdown_send, ec);
|
_socket->shutdown(asio_ns::local::stream_protocol::socket::shutdown_send, ec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SocketUnixDomain::close(asio_ns::error_code& ec) {
|
void SocketUnixDomain::close(asio_ns::error_code& ec) {
|
||||||
if (_socket.is_open()) {
|
if (_socket->is_open()) {
|
||||||
_socket.close(ec);
|
_socket->close(ec);
|
||||||
if (ec && ec != asio_ns::error::not_connected) {
|
if (ec && ec != asio_ns::error::not_connected) {
|
||||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "closing socket failed with: "
|
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "closing socket failed with: "
|
||||||
<< ec.message();
|
<< ec.message();
|
||||||
|
|
|
@ -36,15 +36,16 @@ class SocketUnixDomain final : public Socket {
|
||||||
friend class AcceptorUnixDomain;
|
friend class AcceptorUnixDomain;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SocketUnixDomain(asio_ns::io_context& ioService)
|
explicit SocketUnixDomain(rest::Scheduler* scheduler)
|
||||||
: Socket(ioService, false), _socket(ioService) {}
|
: Socket(scheduler, false),
|
||||||
|
_socket(scheduler->newDomainSocket()) {}
|
||||||
|
|
||||||
SocketUnixDomain(SocketUnixDomain&& that) = default;
|
SocketUnixDomain(SocketUnixDomain&& that) = default;
|
||||||
|
|
||||||
std::string peerAddress() const override { return "local"; }
|
std::string peerAddress() const override { return "local"; }
|
||||||
int peerPort() const override { return 0; }
|
int peerPort() const override { return 0; }
|
||||||
|
|
||||||
void setNonBlocking(bool v) override { _socket.non_blocking(v); }
|
void setNonBlocking(bool v) override { _socket->non_blocking(v); }
|
||||||
|
|
||||||
size_t writeSome(basics::StringBuffer* buffer,
|
size_t writeSome(basics::StringBuffer* buffer,
|
||||||
asio_ns::error_code& ec) override;
|
asio_ns::error_code& ec) override;
|
||||||
|
@ -67,7 +68,7 @@ class SocketUnixDomain final : public Socket {
|
||||||
void close(asio_ns::error_code& ec) override;
|
void close(asio_ns::error_code& ec) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
asio_ns::local::stream_protocol::socket _socket;
|
std::unique_ptr<asio_ns::local::stream_protocol::socket> _socket;
|
||||||
asio_ns::local::stream_protocol::acceptor::endpoint_type _peerEndpoint;
|
asio_ns::local::stream_protocol::acceptor::endpoint_type _peerEndpoint;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,8 +27,6 @@
|
||||||
#include <velocypack/Builder.h>
|
#include <velocypack/Builder.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
#include "Scheduler/EventLoop.h"
|
|
||||||
|
|
||||||
using namespace arangodb::rest;
|
using namespace arangodb::rest;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -36,7 +34,9 @@ std::atomic_uint_fast64_t NEXT_TASK_ID(static_cast<uint64_t>(TRI_microtime() *
|
||||||
100000.0));
|
100000.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
Task::Task(arangodb::EventLoop loop, std::string const& name)
|
Task::Task(Scheduler* scheduler, std::string const& name)
|
||||||
: _loop(loop),
|
: _scheduler(scheduler),
|
||||||
_taskId(NEXT_TASK_ID.fetch_add(1, std::memory_order_seq_cst)),
|
_taskId(NEXT_TASK_ID.fetch_add(1, std::memory_order_seq_cst)),
|
||||||
_name(name) {}
|
_name(name) {
|
||||||
|
TRI_ASSERT(_scheduler != nullptr);
|
||||||
|
}
|
||||||
|
|
|
@ -27,8 +27,6 @@
|
||||||
|
|
||||||
#include "Basics/Common.h"
|
#include "Basics/Common.h"
|
||||||
|
|
||||||
#include "Scheduler/EventLoop.h"
|
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
namespace velocypack {
|
namespace velocypack {
|
||||||
class Builder;
|
class Builder;
|
||||||
|
@ -37,17 +35,18 @@ class Builder;
|
||||||
class TaskData;
|
class TaskData;
|
||||||
|
|
||||||
namespace rest {
|
namespace rest {
|
||||||
|
class Scheduler;
|
||||||
|
|
||||||
class Task : public std::enable_shared_from_this<Task> {
|
class Task : public std::enable_shared_from_this<Task> {
|
||||||
Task(Task const&) = delete;
|
Task(Task const&) = delete;
|
||||||
Task& operator=(Task const&) = delete;
|
Task& operator=(Task const&) = delete;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Task(EventLoop, std::string const& name);
|
Task(Scheduler*, std::string const& name);
|
||||||
virtual ~Task() = default;
|
virtual ~Task() = default;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
uint64_t taskId() const { return _taskId; }
|
uint64_t taskId() const { return _taskId; }
|
||||||
EventLoop eventLoop() const { return _loop; }
|
|
||||||
std::string const& name() const { return _name; }
|
std::string const& name() const { return _name; }
|
||||||
|
|
||||||
// get a VelocyPack representation of the task for reporting
|
// get a VelocyPack representation of the task for reporting
|
||||||
|
@ -55,7 +54,7 @@ class Task : public std::enable_shared_from_this<Task> {
|
||||||
void toVelocyPack(arangodb::velocypack::Builder&) const;
|
void toVelocyPack(arangodb::velocypack::Builder&) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
EventLoop _loop;
|
Scheduler * const _scheduler;
|
||||||
uint64_t const _taskId;
|
uint64_t const _taskId;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -351,8 +351,8 @@ void V8Task::start() {
|
||||||
ExecContext::CURRENT->isAdminUser() ||
|
ExecContext::CURRENT->isAdminUser() ||
|
||||||
(!_user.empty() && ExecContext::CURRENT->user() == _user));
|
(!_user.empty() && ExecContext::CURRENT->user() == _user));
|
||||||
|
|
||||||
auto ioService = SchedulerFeature::SCHEDULER->ioContext();
|
_timer.reset(SchedulerFeature::SCHEDULER->newSteadyTimer());
|
||||||
_timer.reset(new asio::steady_timer(*ioService));
|
|
||||||
if (_offset.count() <= 0) {
|
if (_offset.count() <= 0) {
|
||||||
_offset = std::chrono::microseconds(1);
|
_offset = std::chrono::microseconds(1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#include "LocalTaskQueue.h"
|
#include "LocalTaskQueue.h"
|
||||||
|
|
||||||
#include "Basics/ConditionLocker.h"
|
#include "Basics/ConditionLocker.h"
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
#include "Basics/MutexLocker.h"
|
#include "Basics/MutexLocker.h"
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
|
|
||||||
#if ARANGODB_STANDALONE_ASIO
|
#if ARANGODB_STANDALONE_ASIO
|
||||||
|
|
||||||
|
#define ASIO_HAS_MOVE 1
|
||||||
|
|
||||||
#include <asio/buffer.hpp>
|
#include <asio/buffer.hpp>
|
||||||
#include <asio/error.hpp>
|
#include <asio/error.hpp>
|
||||||
#include <asio/io_context.hpp>
|
#include <asio/io_context.hpp>
|
||||||
|
|
Loading…
Reference in New Issue