1
0
Fork 0

Feature/remove event loop (#5565)

This commit is contained in:
Frank Celler 2018-06-11 11:46:17 +02:00 committed by Jan
parent 396d98a1cc
commit efc030ea87
37 changed files with 293 additions and 286 deletions

View File

@ -66,12 +66,12 @@ static std::string const Open("/_open/");
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
GeneralCommTask::GeneralCommTask(EventLoop loop, GeneralServer* server,
GeneralCommTask::GeneralCommTask(Scheduler* scheduler, GeneralServer* server,
std::unique_ptr<Socket> socket,
ConnectionInfo&& info, double keepAliveTimeout,
bool skipSocketInit)
: Task(loop, "GeneralCommTask"),
SocketTask(loop, std::move(socket), std::move(info), keepAliveTimeout,
: Task(scheduler, "GeneralCommTask"),
SocketTask(scheduler, std::move(socket), std::move(info), keepAliveTimeout,
skipSocketInit),
_server(server),
_auth(nullptr) {
@ -364,7 +364,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
} else if (handler->isDirect()) {
isDirect = true;
} else if (queuePrio != JobQueue::BACKGROUND_QUEUE &&
_loop.scheduler->shouldExecuteDirect()) {
_scheduler->shouldExecuteDirect()) {
isDirect = true;
} else if (ServerState::instance()->isDBServer()) {
isPrio = true;
@ -388,7 +388,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
auto self = shared_from_this();
if (isPrio) {
_loop.scheduler->post([self, this, handler]() {
_scheduler->post([self, this, handler]() {
handleRequestDirectly(basics::ConditionalLocking::DoLock,
std::move(handler));
});
@ -397,7 +397,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
// ok, we need to queue the request
LOG_TOPIC(TRACE, Logger::THREADS) << "too much work, queuing handler: "
<< _loop.scheduler->infoStatus();
<< _scheduler->infoStatus();
uint64_t messageId = handler->messageId();
auto job = std::make_unique<Job>(
_server, std::move(handler),
@ -419,7 +419,7 @@ bool GeneralCommTask::handleRequestSync(std::shared_ptr<RestHandler> handler) {
// Just run the handler, could have been called in a different thread
void GeneralCommTask::handleRequestDirectly(
bool doLock, std::shared_ptr<RestHandler> handler) {
TRI_ASSERT(doLock || _peer->strand.running_in_this_thread());
TRI_ASSERT(doLock || _peer->runningInThisThread());
handler->runHandler([this, doLock](rest::RestHandler* handler) {
RequestStatistics* stat = handler->stealStatistics();
@ -427,15 +427,12 @@ void GeneralCommTask::handleRequestDirectly(
if (doLock) {
auto self = shared_from_this();
auto h = handler->shared_from_this();
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this, stat, h]() {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this, stat, h]() {
addResponse(*(h->response()), stat);
});
} else {
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
addResponse(*handler->response(), stat);
}
});

View File

@ -85,7 +85,7 @@ class GeneralCommTask : public SocketTask {
GeneralCommTask const& operator=(GeneralCommTask const&) = delete;
public:
GeneralCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket>,
GeneralCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket>,
ConnectionInfo&&, double keepAliveTimeout,
bool skipSocketInit = false);

View File

@ -38,11 +38,11 @@ using namespace arangodb::rest;
/// @brief listen to given port
////////////////////////////////////////////////////////////////////////////////
GeneralListenTask::GeneralListenTask(EventLoop loop, GeneralServer* server,
GeneralListenTask::GeneralListenTask(Scheduler* scheduler, GeneralServer* server,
Endpoint* endpoint,
ProtocolType connectionType)
: Task(loop, "GeneralListenTask"),
ListenTask(loop, endpoint),
: Task(scheduler, "GeneralListenTask"),
ListenTask(scheduler, endpoint),
_server(server),
_connectionType(connectionType) {
_keepAliveTimeout = GeneralServerFeature::keepAliveTimeout();
@ -52,7 +52,7 @@ GeneralListenTask::GeneralListenTask(EventLoop loop, GeneralServer* server,
void GeneralListenTask::handleConnected(std::unique_ptr<Socket> socket,
ConnectionInfo&& info) {
auto commTask = std::make_shared<HttpCommTask>(_loop, _server, std::move(socket),
auto commTask = std::make_shared<HttpCommTask>(_scheduler, _server, std::move(socket),
std::move(info), _keepAliveTimeout);
bool res = commTask->start();
LOG_TOPIC_IF(DEBUG, Logger::COMMUNICATION, res) << "Started comm task";

View File

@ -42,7 +42,7 @@ class GeneralListenTask final : public ListenTask {
GeneralListenTask& operator=(GeneralListenTask const&) = delete;
public:
GeneralListenTask(EventLoop, GeneralServer*, Endpoint*,
GeneralListenTask(Scheduler*, GeneralServer*, Endpoint*,
ProtocolType connectionType);
protected:

View File

@ -43,9 +43,7 @@ using namespace arangodb::rest;
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
GeneralServer::~GeneralServer() {
_listenTasks.clear();
}
GeneralServer::~GeneralServer() { _listenTasks.clear(); }
// -----------------------------------------------------------------------------
// --SECTION-- public methods
@ -57,18 +55,20 @@ void GeneralServer::setEndpointList(EndpointList const* list) {
void GeneralServer::startListening() {
for (auto& it : _endpointList->allEndpoints()) {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "trying to bind to endpoint '" << it.first
<< "' for requests";
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "trying to bind to endpoint '"
<< it.first << "' for requests";
bool ok = openEndpoint(it.second);
if (ok) {
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "bound to endpoint '" << it.first << "'";
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "bound to endpoint '"
<< it.first << "'";
} else {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "failed to bind to endpoint '" << it.first
<< "'. Please check whether another instance is already "
"running using this endpoint and review your endpoints "
"configuration.";
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "failed to bind to endpoint '" << it.first
<< "'. Please check whether another instance is already "
"running using this endpoint and review your endpoints "
"configuration.";
FATAL_ERROR_EXIT_CODE(TRI_EXIT_COULD_NOT_BIND_PORT);
}
}
@ -94,8 +94,8 @@ bool GeneralServer::openEndpoint(Endpoint* endpoint) {
}
std::unique_ptr<ListenTask> task;
task.reset(new GeneralListenTask(SchedulerFeature::SCHEDULER->eventLoop(),
this, endpoint, protocolType));
task.reset(new GeneralListenTask(SchedulerFeature::SCHEDULER, this, endpoint,
protocolType));
if (!task->start()) {
return false;
}

View File

@ -46,11 +46,11 @@ size_t const HttpCommTask::MaximalBodySize = 1024 * 1024 * 1024; // 1024 MB
size_t const HttpCommTask::MaximalPipelineSize = 1024 * 1024 * 1024; // 1024 MB
size_t const HttpCommTask::RunCompactEvery = 500;
HttpCommTask::HttpCommTask(EventLoop loop, GeneralServer* server,
HttpCommTask::HttpCommTask(Scheduler* scheduler, GeneralServer* server,
std::unique_ptr<Socket> socket,
ConnectionInfo&& info, double timeout)
: Task(loop, "HttpCommTask"),
GeneralCommTask(loop, server, std::move(socket), std::move(info),
: Task(scheduler, "HttpCommTask"),
GeneralCommTask(scheduler, server, std::move(socket), std::move(info),
timeout),
_readPosition(0),
_startPosition(0),
@ -94,7 +94,8 @@ void HttpCommTask::addSimpleResponse(rest::ResponseCode code, rest::ContentType
void HttpCommTask::addResponse(GeneralResponse& baseResponse,
RequestStatistics* stat) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
HttpResponse& response = dynamic_cast<HttpResponse&>(baseResponse);
#else
@ -201,7 +202,7 @@ void HttpCommTask::addResponse(GeneralResponse& baseResponse,
// reads data from the socket
// caller must hold the _lock
bool HttpCommTask::processRead(double startTime) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
cancelKeepAlive();
TRI_ASSERT(_readBuffer.c_str() != nullptr);
@ -281,7 +282,7 @@ bool HttpCommTask::processRead(double startTime) {
}
std::shared_ptr<GeneralCommTask> commTask = std::make_shared<VstCommTask>(
_loop, _server, std::move(_peer), std::move(_connectionInfo),
_scheduler, _server, std::move(_peer), std::move(_connectionInfo),
GeneralServerFeature::keepAliveTimeout(),
protocolVersion, /*skipSocketInit*/ true);
commTask->addToReadBuffer(_readBuffer.c_str() + 11,
@ -594,7 +595,8 @@ bool HttpCommTask::processRead(double startTime) {
}
void HttpCommTask::processRequest(std::unique_ptr<HttpRequest> request) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
{
LOG_TOPIC(DEBUG, Logger::REQUESTS)
<< "\"http-request-begin\",\"" << (void*)this << "\",\""

View File

@ -17,7 +17,7 @@ class HttpCommTask final : public GeneralCommTask {
static size_t const RunCompactEvery;
public:
HttpCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket> socket,
HttpCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket> socket,
ConnectionInfo&&, double timeout);
arangodb::Endpoint::TransportType transportType() override {

View File

@ -26,7 +26,6 @@
#include "Basics/Common.h"
#include "Rest/GeneralResponse.h"
#include "Scheduler/EventLoop.h"
#include "Scheduler/JobQueue.h"
namespace arangodb {

View File

@ -79,12 +79,12 @@ inline std::size_t validateAndCount(char const* vpStart,
}
VstCommTask::VstCommTask(EventLoop loop, GeneralServer* server,
VstCommTask::VstCommTask(Scheduler* scheduler, GeneralServer* server,
std::unique_ptr<Socket> socket, ConnectionInfo&& info,
double timeout, ProtocolVersion protocolVersion,
bool skipInit)
: Task(loop, "VstCommTask"),
GeneralCommTask(loop, server, std::move(socket), std::move(info), timeout,
: Task(scheduler, "VstCommTask"),
GeneralCommTask(scheduler, server, std::move(socket), std::move(info), timeout,
skipInit),
_authorized(false),
_authMethod(rest::AuthenticationMethod::NONE),
@ -119,8 +119,8 @@ void VstCommTask::addSimpleResponse(rest::ResponseCode code, rest::ContentType r
void VstCommTask::addResponse(GeneralResponse& baseResponse,
RequestStatistics* stat) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
//_lock.assertLockedByCurrentThread();
TRI_ASSERT(_peer->runningInThisThread());
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
VstResponse& response = dynamic_cast<VstResponse&>(baseResponse);
#else
@ -316,8 +316,7 @@ void VstCommTask::handleAuthHeader(VPackSlice const& header,
// reads data from the socket
bool VstCommTask::processRead(double startTime) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
//_lock.assertLockedByCurrentThread();
TRI_ASSERT(_peer->runningInThisThread());
auto& prv = _processReadVariables;
auto chunkBegin = _readBuffer.begin() + prv._readBufferOffset;

View File

@ -38,7 +38,7 @@ namespace rest {
class VstCommTask final : public GeneralCommTask {
public:
VstCommTask(EventLoop, GeneralServer*, std::unique_ptr<Socket> socket,
VstCommTask(Scheduler*, GeneralServer*, std::unique_ptr<Socket> socket,
ConnectionInfo&&, double timeout, ProtocolVersion protocolVersion,
bool skipSocketInit = false);

View File

@ -419,12 +419,10 @@ void Conductor::startRecovery() {
_statistics.reset();
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
asio::io_context* ioService = SchedulerFeature::SCHEDULER->ioContext();
TRI_ASSERT(ioService != nullptr);
// let's wait for a final state in the cluster
_boost_timer.reset(new asio::deadline_timer(
*ioService, boost::posix_time::seconds(2)));
_boost_timer.reset(SchedulerFeature::SCHEDULER->newDeadlineTimer(
boost::posix_time::seconds(2)));
_boost_timer->async_wait([this](const asio::error_code& error) {
_boost_timer.reset();

View File

@ -576,15 +576,13 @@ void Worker<V, E, M>::_continueAsync() {
}
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
asio::io_context* ioService = SchedulerFeature::SCHEDULER->ioContext();
TRI_ASSERT(ioService != nullptr);
// wait for new messages before beginning to process
int64_t milli =
_writeCache->containedMessageCount() < _messageBatchSize ? 50 : 5;
// start next iteration in $milli mseconds.
_boost_timer.reset(new asio::deadline_timer(
*ioService, boost::posix_time::millisec(milli)));
_boost_timer.reset(SchedulerFeature::SCHEDULER->newDeadlineTimer(
boost::posix_time::millisec(milli)));
_boost_timer->async_wait([this](const asio::error_code& error) {
if (error != asio::error::operation_aborted) {
{ // swap these pointers atomically

View File

@ -31,15 +31,15 @@
using namespace arangodb;
Acceptor::Acceptor(asio_ns::io_context& ioService, Endpoint* endpoint)
: _ioContext(ioService), _endpoint(endpoint) {}
Acceptor::Acceptor(rest::Scheduler* scheduler, Endpoint* endpoint)
: _scheduler(scheduler), _endpoint(endpoint) {}
std::unique_ptr<Acceptor> Acceptor::factory(asio_ns::io_context& ioService,
std::unique_ptr<Acceptor> Acceptor::factory(rest::Scheduler* scheduler,
Endpoint* endpoint) {
#ifdef ARANGODB_HAVE_DOMAIN_SOCKETS
if (endpoint->domainType() == Endpoint::DomainType::UNIX) {
return std::make_unique<AcceptorUnixDomain>(ioService, endpoint);
return std::make_unique<AcceptorUnixDomain>(scheduler, endpoint);
}
#endif
return std::make_unique<AcceptorTcp>(ioService, endpoint);
return std::make_unique<AcceptorTcp>(scheduler, endpoint);
}

View File

@ -36,7 +36,7 @@ class Acceptor {
typedef std::function<void(asio_ns::error_code const&)> AcceptHandler;
public:
Acceptor(asio_ns::io_context& ioService, Endpoint* endpoint);
Acceptor(rest::Scheduler*, Endpoint* endpoint);
virtual ~Acceptor() {}
public:
@ -46,11 +46,10 @@ class Acceptor {
std::unique_ptr<Socket> movePeer() { return std::move(_peer); };
public:
static std::unique_ptr<Acceptor> factory(asio_ns::io_context& _ioService,
Endpoint* endpoint);
static std::unique_ptr<Acceptor> factory(rest::Scheduler*, Endpoint*);
protected:
asio_ns::io_context& _ioContext;
rest::Scheduler* _scheduler;
Endpoint* _endpoint;
std::unique_ptr<Socket> _peer;
};

View File

@ -31,7 +31,7 @@
using namespace arangodb;
void AcceptorTcp::open() {
asio_ns::ip::tcp::resolver resolver(_ioContext);
std::unique_ptr<asio_ns::ip::tcp::resolver> resolver(_scheduler->newResolver());
std::string hostname = _endpoint->host();
int portNumber = _endpoint->port();
@ -53,7 +53,7 @@ void AcceptorTcp::open() {
THROW_ARANGO_EXCEPTION(TRI_ERROR_IP_ADDRESS_INVALID);
}
asio_ns::ip::tcp::resolver::iterator iter = resolver.resolve(*query, err);
asio_ns::ip::tcp::resolver::iterator iter = resolver->resolve(*query, err);
if (err) {
LOG_TOPIC(ERR, Logger::COMMUNICATION)
<< "unable to to resolve endpoint ' " << _endpoint->specification()
@ -68,7 +68,7 @@ void AcceptorTcp::open() {
asioEndpoint = iter->endpoint(); // function not documented in boost?!
}
_acceptor.open(asioEndpoint.protocol());
_acceptor->open(asioEndpoint.protocol());
#ifdef _WIN32
// on Windows everything is different of course:
@ -85,11 +85,11 @@ void AcceptorTcp::open() {
"unable to set acceptor socket option");
}
#else
_acceptor.set_option(asio_ns::ip::tcp::acceptor::reuse_address(
_acceptor->set_option(asio_ns::ip::tcp::acceptor::reuse_address(
((EndpointIp*)_endpoint)->reuseAddress()));
#endif
_acceptor.bind(asioEndpoint, err);
_acceptor->bind(asioEndpoint, err);
if (err) {
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to bind to endpoint '"
<< _endpoint->specification()
@ -98,7 +98,7 @@ void AcceptorTcp::open() {
}
TRI_ASSERT(_endpoint->listenBacklog() > 8);
_acceptor.listen(_endpoint->listenBacklog(), err);
_acceptor->listen(_endpoint->listenBacklog(), err);
if (err) {
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "unable to listen to endpoint '"
<< _endpoint->specification() << ": "
@ -110,13 +110,13 @@ void AcceptorTcp::open() {
void AcceptorTcp::asyncAccept(AcceptHandler const& handler) {
TRI_ASSERT(!_peer);
if (_endpoint->encryption() == Endpoint::EncryptionType::SSL) {
_peer.reset(new SocketSslTcp(_ioContext,
_peer.reset(new SocketSslTcp(_scheduler,
SslServerFeature::SSL->createSslContext()));
SocketSslTcp* peer = static_cast<SocketSslTcp*>(_peer.get());
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
_acceptor->async_accept(peer->_socket, peer->_peerEndpoint, handler);
} else {
_peer.reset(new SocketTcp(_ioContext));
_peer.reset(new SocketTcp(_scheduler));
SocketTcp* peer = static_cast<SocketTcp*>(_peer.get());
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
_acceptor->async_accept(*peer->_socket, peer->_peerEndpoint, handler);
}
}

View File

@ -28,16 +28,16 @@
namespace arangodb {
class AcceptorTcp final : public Acceptor {
public:
AcceptorTcp(asio_ns::io_context& ioContext, Endpoint* endpoint)
: Acceptor(ioContext, endpoint), _acceptor(ioContext) {}
AcceptorTcp(rest::Scheduler* scheduler, Endpoint* endpoint)
: Acceptor(scheduler, endpoint), _acceptor(scheduler->newAcceptor()) {}
public:
void open() override;
void close() override { _acceptor.close(); };
void close() override { _acceptor->close(); };
void asyncAccept(Acceptor::AcceptHandler const& handler) override;
private:
asio_ns::ip::tcp::acceptor _acceptor;
std::unique_ptr<asio_ns::ip::tcp::acceptor> _acceptor;
};
}

View File

@ -45,23 +45,23 @@ void AcceptorUnixDomain::open() {
}
asio_ns::local::stream_protocol::stream_protocol::endpoint endpoint(path);
_acceptor.open(endpoint.protocol());
_acceptor.bind(endpoint);
_acceptor.listen();
_acceptor->open(endpoint.protocol());
_acceptor->bind(endpoint);
_acceptor->listen();
}
void AcceptorUnixDomain::asyncAccept(AcceptHandler const& handler) {
TRI_ASSERT(!_peer);
_peer.reset(new SocketUnixDomain(_ioContext));
_peer.reset(new SocketUnixDomain(_scheduler));
auto peer = dynamic_cast<SocketUnixDomain*>(_peer.get());
if (peer == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected socket type");
}
_acceptor.async_accept(peer->_socket, peer->_peerEndpoint, handler);
_acceptor->async_accept(*peer->_socket, peer->_peerEndpoint, handler);
}
void AcceptorUnixDomain::close() {
_acceptor.close();
_acceptor->close();
int error = 0;
std::string path = ((EndpointUnixDomain*) _endpoint)->path();
if (!basics::FileUtils::remove(path, &error)) {

View File

@ -28,8 +28,9 @@
namespace arangodb {
class AcceptorUnixDomain final : public Acceptor {
public:
AcceptorUnixDomain(asio_ns::io_context& ioService, Endpoint* endpoint)
: Acceptor(ioService, endpoint), _acceptor(ioService) {}
AcceptorUnixDomain(rest::Scheduler* scheduler, Endpoint* endpoint)
: Acceptor(scheduler, endpoint),
_acceptor(scheduler->newDomainAcceptor()) {}
public:
void open() override;
@ -37,7 +38,7 @@ class AcceptorUnixDomain final : public Acceptor {
void asyncAccept(AcceptHandler const& handler) override;
private:
asio_ns::local::stream_protocol::acceptor _acceptor;
std::unique_ptr<asio_ns::local::stream_protocol::acceptor> _acceptor;
};
}

View File

@ -1,48 +0,0 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2018 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Achim Brandt
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_SCHEDULER_EVENTS_H
#define ARANGOD_SCHEDULER_EVENTS_H 1
#include "Basics/Common.h"
#include "Scheduler/Socket.h"
namespace arangodb {
namespace rest {
class Scheduler;
}
struct EventLoop {
EventLoop(asio_ns::io_context* service, rest::Scheduler* schdlr)
: ioContext(service), scheduler(schdlr) {}
EventLoop() : EventLoop(nullptr, nullptr) {}
asio_ns::io_context* ioContext;
rest::Scheduler* scheduler;
};
}
#endif

View File

@ -26,7 +26,6 @@
#include "Basics/Common.h"
#include "Basics/SameThreadAsserter.h"
#include "Scheduler/EventLoop.h"
#include "Scheduler/Scheduler.h"
namespace arangodb {
@ -39,8 +38,8 @@ class JobGuard : public SameThreadAsserter {
JobGuard(JobGuard const&) = delete;
JobGuard& operator=(JobGuard const&) = delete;
explicit JobGuard(EventLoop const& loop) : SameThreadAsserter(), _scheduler(loop.scheduler) {}
explicit JobGuard(rest::Scheduler* scheduler) : SameThreadAsserter(), _scheduler(scheduler) {}
explicit JobGuard(rest::Scheduler* scheduler)
: SameThreadAsserter(), _scheduler(scheduler) {}
~JobGuard() { release(); }
public:

View File

@ -37,11 +37,11 @@ using namespace arangodb::rest;
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
ListenTask::ListenTask(EventLoop loop, Endpoint* endpoint)
: Task(loop, "ListenTask"),
ListenTask::ListenTask(Scheduler* scheduler, Endpoint* endpoint)
: Task(scheduler, "ListenTask"),
_endpoint(endpoint),
_bound(false),
_acceptor(Acceptor::factory(*loop.ioContext, endpoint)) {}
_acceptor(Acceptor::factory(scheduler, endpoint)) {}
ListenTask::~ListenTask() {}
@ -69,7 +69,7 @@ bool ListenTask::start() {
_handler = [this](asio_ns::error_code const& ec) {
MUTEX_LOCKER(mutex, _shutdownMutex);
JobGuard guard(_loop);
JobGuard guard(_scheduler);
guard.work();
if (!_bound) {

View File

@ -39,7 +39,7 @@ class ListenTask : virtual public rest::Task {
static size_t const MAX_ACCEPT_ERRORS = 128;
public:
ListenTask(EventLoop, Endpoint*);
ListenTask(rest::Scheduler*, Endpoint*);
~ListenTask();
public:

View File

@ -36,6 +36,7 @@
#include "Logger/Logger.h"
#include "Random/RandomGenerator.h"
#include "Rest/GeneralResponse.h"
#include "Scheduler/Acceptor.h"
#include "Scheduler/JobGuard.h"
#include "Scheduler/JobQueue.h"
#include "Scheduler/Task.h"
@ -186,14 +187,42 @@ Scheduler::~Scheduler() {
void Scheduler::post(std::function<void()> callback) {
++_nrQueued;
_ioContext.get()->post([this, callback]() {
try {
// capture without self, ioContext will not live longer than scheduler
_ioContext.get()->post([this, callback]() {
--_nrQueued;
JobGuard guard(this);
guard.work();
callback();
});
} catch (...) {
--_nrQueued;
throw;
}
}
JobGuard guard(this);
guard.work();
void Scheduler::post(asio_ns::io_context::strand& strand,
std::function<void()> callback) {
++_nrQueued;
callback();
});
try {
// capture without self, ioContext will not live longer than scheduler
strand.post([this, callback]() {
--_nrQueued;
JobGuard guard(this);
guard.work();
callback();
});
} catch (...) {
--_nrQueued;
throw;
}
}
bool Scheduler::start() {
@ -431,6 +460,7 @@ void Scheduler::rebalanceThreads() {
// all threads are maxed out
_lastAllBusyStamp = now;
// increase nrRunning by one here already, while holding the lock
incRunning();
}
@ -512,3 +542,4 @@ void Scheduler::initializeSignalHandlers() {
}
#endif
}

View File

@ -28,23 +28,22 @@
#include "Basics/Common.h"
#include "Basics/Mutex.h"
#include "Basics/asio_ns.h"
#include "Basics/socket-utils.h"
#include "Scheduler/EventLoop.h"
#include "Endpoint/Endpoint.h"
#include "Scheduler/Job.h"
#include "Scheduler/Socket.h"
namespace arangodb {
class JobQueue;
class Acceptor;
class JobGuard;
class JobQueue;
class ListenTask;
namespace velocypack {
class Builder;
}
class ListenTask;
namespace rest {
class GeneralCommTask;
class SocketTask;
@ -63,17 +62,11 @@ class Scheduler {
virtual ~Scheduler();
public:
asio_ns::io_context* ioContext() const { return _ioContext.get(); }
// XXX-TODO remove, replace with signal handler
asio_ns::io_context* managerService() const { return _managerService.get(); }
EventLoop eventLoop() {
// cannot use
// return EventLoop{._ioService = *_ioService.get(), ._scheduler = this};
// because windows complains ...
return EventLoop{_ioContext.get(), this};
}
void post(std::function<void()> callback);
void post(asio_ns::io_context::strand&, std::function<void()> callback);
bool start();
bool isRunning() const { return numRunning(_counters) > 0; }
@ -83,6 +76,50 @@ class Scheduler {
bool isStopping() { return (_counters & (1ULL << 63)) != 0; }
void shutdown();
template<typename T>
asio_ns::deadline_timer* newDeadlineTimer(T timeout) {
return new asio_ns::deadline_timer(*_ioContext, timeout);
}
asio_ns::steady_timer* newSteadyTimer() {
return new asio_ns::steady_timer(*_ioContext);
}
asio_ns::io_context::strand* newStrand() {
return new asio_ns::io_context::strand(*_ioContext);
}
asio_ns::ip::tcp::acceptor*
newAcceptor() {
return new asio_ns::ip::tcp::acceptor(*_ioContext);
}
asio_ns::local::stream_protocol::acceptor*
newDomainAcceptor() {
return new asio_ns::local::stream_protocol::acceptor(*_ioContext);
}
asio_ns::ip::tcp::socket*
newSocket() {
return new asio_ns::ip::tcp::socket(*_ioContext);
}
asio_ns::local::stream_protocol::socket*
newDomainSocket() {
return new asio_ns::local::stream_protocol::socket(*_ioContext);
}
asio_ns::ssl::stream<asio_ns::ip::tcp::socket>*
newSslSocket(asio_ns::ssl::context& context) {
return new asio_ns::ssl::stream<asio_ns::ip::tcp::socket>(
*_ioContext, context);
}
asio_ns::ip::tcp::resolver*
newResolver() {
return new asio_ns::ip::tcp::resolver(*_ioContext);
}
public:
// decrements the nrRunning counter for the thread
void stopThread();
@ -102,21 +139,21 @@ class Scheduler {
uint64_t minimum() const { return _nrMinimum; }
// number of queued handlers
// number of jobs that are currently been posted to the io_context,
// but where the handler has not yet been called. The number of
// handler in total is numQueued() + numRunning(_counters)
inline uint64_t numQueued() const noexcept { return _nrQueued; };
inline uint64_t getCounters() const noexcept { return _counters; }
// number of running threads
static uint64_t numRunning(uint64_t value) noexcept {
return value & 0xFFFFULL;
}
// number of working threads
static uint64_t numWorking(uint64_t value) noexcept {
return (value >> 16) & 0xFFFFULL;
}
// number of blocked threads
static uint64_t numBlocked(uint64_t value) noexcept {
return (value >> 32) & 0xFFFFULL;
}
@ -142,13 +179,15 @@ class Scheduler {
// AA BB CC DD
//
// we use the lowest 2 bytes (DD) to store the number of running threads
//
// the next lowest bytes (CC) are used to store the number of currently
// working threads
//
// the next bytes (BB) are used to store the number of currently blocked
// threads
//
// the highest bytes (AA) are used only to encode a stopping bit. when this
// bit is
// set, the scheduler is stopping (or already stopped)
// bit is set, the scheduler is stopping (or already stopped)
inline void setStopping() noexcept { _counters |= (1ULL << 63); }
inline void incRunning() noexcept { _counters += 1ULL << 0; }
@ -192,7 +231,6 @@ class Scheduler {
// meaning of its individual bits
std::atomic<uint64_t> _counters;
// number of jobs that are currently been queued, but not worked on
std::atomic<uint64_t> _nrQueued;
std::unique_ptr<JobQueue> _jobQueue;

View File

@ -28,19 +28,27 @@
#include "Basics/StringBuffer.h"
#include "Basics/asio_ns.h"
#include "Logger/Logger.h"
#include "Scheduler/JobGuard.h"
namespace arangodb {
namespace rest {
class Scheduler;
}
typedef std::function<void(const asio_ns::error_code& ec,
std::size_t transferred)>
AsyncHandler;
class Socket {
public:
Socket(asio_ns::io_context& ioContext, bool encrypted)
: _encrypted(encrypted), strand(ioContext) {}
Socket(rest::Scheduler* scheduler, bool encrypted)
: _strand(scheduler->newStrand()),
_encrypted(encrypted),
_scheduler(scheduler) {
TRI_ASSERT(_scheduler != nullptr);
}
Socket(Socket const& that) = delete;
Socket(Socket&& that) = delete;
virtual ~Socket() {}
@ -76,6 +84,12 @@ class Socket {
}
}
void post(std::function<void()> handler) {
_scheduler->post(*_strand, handler);
}
bool runningInThisThread() { return _strand->running_in_this_thread(); }
public:
virtual std::string peerAddress() const = 0;
virtual int peerPort() const = 0;
@ -96,13 +110,14 @@ class Socket {
virtual void shutdownReceive(asio_ns::error_code& ec) = 0;
virtual void shutdownSend(asio_ns::error_code& ec) = 0;
protected:
// strand to ensure the connection's handlers are not called concurrently.
std::unique_ptr<asio_ns::io_context::strand> _strand;
private:
bool const _encrypted;
bool _handshakeDone = false;
public:
// strand to ensure the connection's handlers are not called concurrently.
asio_ns::io_context::strand strand;
rest::Scheduler* _scheduler;
};
}

View File

@ -34,7 +34,7 @@ bool SocketSslTcp::sslHandshake() {
while (true) {
ec.clear();
_sslSocket.handshake(asio_ns::ssl::stream_base::handshake_type::server, ec);
_sslSocket->handshake(asio_ns::ssl::stream_base::handshake_type::server, ec);
if (ec.value() != asio_ns::error::would_block) {
break;

View File

@ -33,11 +33,11 @@ class SocketSslTcp final : public Socket {
friend class AcceptorTcp;
public:
SocketSslTcp(asio_ns::io_context& ioService, asio_ns::ssl::context&& context)
: Socket(ioService, /*encrypted*/ true),
SocketSslTcp(rest::Scheduler* scheduler, asio_ns::ssl::context&& context)
: Socket(scheduler, /*encrypted*/ true),
_sslContext(std::move(context)),
_sslSocket(ioService, _sslContext),
_socket(_sslSocket.next_layer()),
_sslSocket(scheduler->newSslSocket(_sslContext)),
_socket(_sslSocket->next_layer()),
_peerEndpoint() {}
SocketSslTcp(SocketSslTcp const& that) = delete;
@ -55,23 +55,23 @@ class SocketSslTcp final : public Socket {
size_t writeSome(basics::StringBuffer* buffer,
asio_ns::error_code& ec) override {
return _sslSocket.write_some(
return _sslSocket->write_some(
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
}
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) override {
return asio_ns::async_write(_sslSocket, buffer, strand.wrap(handler));
return asio_ns::async_write(*_sslSocket, buffer, _strand->wrap(handler));
}
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
asio_ns::error_code& ec) override {
return _sslSocket.read_some(buffer, ec);
return _sslSocket->read_some(buffer, ec);
}
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) override {
return _sslSocket.async_read_some(buffer, strand.wrap(handler));
return _sslSocket->async_read_some(buffer, _strand->wrap(handler));
}
std::size_t available(asio_ns::error_code& ec) override {
@ -101,7 +101,7 @@ class SocketSslTcp final : public Socket {
private:
asio_ns::ssl::context _sslContext;
asio_ns::ssl::stream<asio_ns::ip::tcp::socket> _sslSocket;
std::unique_ptr<asio_ns::ssl::stream<asio_ns::ip::tcp::socket>> _sslSocket;
asio_ns::ip::tcp::socket& _socket;
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
};

View File

@ -31,7 +31,6 @@
#include "Basics/socket-utils.h"
#include "Endpoint/ConnectionInfo.h"
#include "Logger/Logger.h"
#include "Scheduler/EventLoop.h"
#include "Scheduler/JobGuard.h"
#include "Scheduler/Scheduler.h"
#include "Scheduler/SchedulerFeature.h"
@ -45,11 +44,11 @@ using namespace arangodb::rest;
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
SocketTask::SocketTask(arangodb::EventLoop loop,
SocketTask::SocketTask(Scheduler* scheduler,
std::unique_ptr<arangodb::Socket> socket,
arangodb::ConnectionInfo&& connectionInfo,
double keepAliveTimeout, bool skipInit = false)
: Task(loop, "SocketTask"),
: Task(scheduler, "SocketTask"),
_peer(std::move(socket)),
_connectionInfo(std::move(connectionInfo)),
_connectionStatistics(nullptr),
@ -57,7 +56,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
_stringBuffers{_stringBuffersArena},
_writeBuffer(nullptr, nullptr),
_keepAliveTimeout(static_cast<long>(keepAliveTimeout * 1000)),
_keepAliveTimer(*_loop.ioContext, _keepAliveTimeout),
_keepAliveTimer(scheduler->newDeadlineTimer(_keepAliveTimeout)),
_useKeepAliveTimer(keepAliveTimeout > 0.0),
_keepAliveTimerActive(false),
_closeRequested(false),
@ -88,7 +87,7 @@ SocketTask::~SocketTask() {
asio_ns::error_code err;
if (_keepAliveTimerActive.load(std::memory_order_relaxed)) {
_keepAliveTimer.cancel(err);
_keepAliveTimer->cancel(err);
}
if (err) {
@ -132,13 +131,11 @@ bool SocketTask::start() {
<< _connectionInfo.clientPort;
auto self = shared_from_this();
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this]() {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this]() {
asyncReadSome();
});
return true;
}
@ -148,7 +145,7 @@ bool SocketTask::start() {
// caller must hold the _lock
void SocketTask::addWriteBuffer(WriteBuffer&& buffer) {
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
if (_closedSend.load(std::memory_order_acquire) ||
_abandoned.load(std::memory_order_acquire)) {
@ -172,7 +169,7 @@ void SocketTask::addWriteBuffer(WriteBuffer&& buffer) {
// caller must hold the _lock
bool SocketTask::completedWriteBuffer() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
RequestStatistics::SET_WRITE_END(_writeBuffer._statistics);
_writeBuffer.release(this); // try to recycle the string buffer
@ -197,11 +194,8 @@ void SocketTask::closeStream() {
// strand::dispatch may execute this immediately if this
// is called on a thread inside the same strand
auto self = shared_from_this();
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this] {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this] {
closeStreamNoLock();
});
}
@ -209,7 +203,7 @@ void SocketTask::closeStream() {
// caller must hold the _lock
void SocketTask::closeStreamNoLock() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
bool mustCloseSend = !_closedSend.load(std::memory_order_acquire);
bool mustCloseReceive = !_closedReceive.load(std::memory_order_acquire);
@ -223,7 +217,7 @@ void SocketTask::closeStreamNoLock() {
_closedSend.store(true, std::memory_order_release);
_closedReceive.store(true, std::memory_order_release);
_closeRequested.store(false, std::memory_order_release);
_keepAliveTimer.cancel();
_keepAliveTimer->cancel();
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
}
@ -234,7 +228,8 @@ void SocketTask::closeStreamNoLock() {
// will acquire the _lock
void SocketTask::addToReadBuffer(char const* data, std::size_t len) {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
_readBuffer.appendText(data, len);
}
@ -242,7 +237,7 @@ void SocketTask::addToReadBuffer(char const* data, std::size_t len) {
void SocketTask::resetKeepAlive() {
if (_useKeepAliveTimer) {
asio_ns::error_code err;
_keepAliveTimer.expires_from_now(_keepAliveTimeout, err);
_keepAliveTimer->expires_from_now(_keepAliveTimeout, err);
if (err) {
closeStream();
return;
@ -250,7 +245,7 @@ void SocketTask::resetKeepAlive() {
_keepAliveTimerActive.store(true, std::memory_order_relaxed);
auto self = shared_from_this();
_keepAliveTimer.async_wait([self, this](const asio_ns::error_code& error) {
_keepAliveTimer->async_wait([self, this](const asio_ns::error_code& error) {
if (!error) { // error will be true if timer was canceled
LOG_TOPIC(ERR, Logger::COMMUNICATION)
<< "keep alive timout - closing stream!";
@ -265,7 +260,7 @@ void SocketTask::cancelKeepAlive() {
if (_useKeepAliveTimer &&
_keepAliveTimerActive.load(std::memory_order_relaxed)) {
asio_ns::error_code err;
_keepAliveTimer.cancel(err);
_keepAliveTimer->cancel(err);
_keepAliveTimerActive.store(false, std::memory_order_relaxed);
}
}
@ -273,7 +268,8 @@ void SocketTask::cancelKeepAlive() {
// caller must hold the _lock
bool SocketTask::reserveMemory() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
if (_readBuffer.reserve(READ_BLOCK_SIZE + 1) == TRI_ERROR_OUT_OF_MEMORY) {
LOG_TOPIC(WARN, arangodb::Logger::COMMUNICATION)
<< "out of memory while reading from client";
@ -291,7 +287,7 @@ bool SocketTask::trySyncRead() {
}
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
asio_ns::error_code err;
TRI_ASSERT(_peer != nullptr);
@ -339,7 +335,7 @@ bool SocketTask::trySyncRead() {
// (new read)
bool SocketTask::processAll() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
double startTime = StatisticsFeature::time();
Result res;
@ -381,7 +377,7 @@ bool SocketTask::processAll() {
// must be invoked on strand
void SocketTask::asyncReadSome() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
try {
size_t const MAX_DIRECT_TRIES = 2;
@ -434,7 +430,7 @@ void SocketTask::asyncReadSome() {
_peer->asyncRead(
asio_ns::buffer(_readBuffer.end(), READ_BLOCK_SIZE),
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
JobGuard guard(_loop);
JobGuard guard(_scheduler);
guard.work();
if (_abandoned.load(std::memory_order_acquire)) {
@ -446,19 +442,11 @@ void SocketTask::asyncReadSome() {
return;
}
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this, transferred] {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this, transferred] {
_readBuffer.increaseLength(transferred);
if (processAll()) {
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this]() {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this]() {
asyncReadSome();
});
}
@ -469,7 +457,7 @@ void SocketTask::asyncReadSome() {
void SocketTask::asyncWriteSome() {
TRI_ASSERT(_peer != nullptr);
TRI_ASSERT(_peer->strand.running_in_this_thread());
TRI_ASSERT(_peer->runningInThisThread());
if (_writeBuffer.empty()) {
return;
@ -525,7 +513,7 @@ void SocketTask::asyncWriteSome() {
_peer->asyncWrite(
asio_ns::buffer(_writeBuffer._buffer->begin() + written, total - written),
[self, this](const asio_ns::error_code& ec, std::size_t transferred) {
JobGuard guard(_loop);
JobGuard guard(_scheduler);
guard.work();
if (_abandoned.load(std::memory_order_acquire)) {
@ -537,12 +525,7 @@ void SocketTask::asyncWriteSome() {
return;
}
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this, transferred] {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this, transferred] {
if (_abandoned.load(std::memory_order_acquire)) {
return;
}
@ -551,11 +534,7 @@ void SocketTask::asyncWriteSome() {
transferred);
if (completedWriteBuffer()) {
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this] {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this] {
if (!_abandoned.load(std::memory_order_acquire)) {
asyncWriteSome();
}
@ -617,11 +596,8 @@ void SocketTask::returnStringBuffer(StringBuffer* buffer) {
void SocketTask::triggerProcessAll() {
// try to process remaining request data
auto self = shared_from_this();
_loop.scheduler->_nrQueued++;
_peer->strand.post([self, this] {
_loop.scheduler->_nrQueued--;
JobGuard guard(_loop);
guard.work();
_peer->post([self, this] {
processAll();
});
}

View File

@ -48,7 +48,7 @@ class SocketTask : virtual public Task {
static size_t const READ_BLOCK_SIZE = 10000;
public:
SocketTask(EventLoop, std::unique_ptr<Socket>, ConnectionInfo&&,
SocketTask(Scheduler*, std::unique_ptr<Socket>, ConnectionInfo&&,
double keepAliveTimeout, bool skipInit);
virtual ~SocketTask();
@ -178,7 +178,7 @@ class SocketTask : virtual public Task {
std::list<WriteBuffer> _writeBuffers;
boost::posix_time::milliseconds _keepAliveTimeout;
asio_ns::deadline_timer _keepAliveTimer;
std::unique_ptr<asio_ns::deadline_timer> _keepAliveTimer;
bool const _useKeepAliveTimer;
std::atomic<bool> _keepAliveTimerActive;

View File

@ -33,9 +33,9 @@ class SocketTcp final : public Socket {
friend class AcceptorTcp;
public:
SocketTcp(asio_ns::io_context& ioService)
: Socket(ioService, /*encrypted*/ false),
_socket(ioService),
SocketTcp(rest::Scheduler* scheduler)
: Socket(scheduler, /*encrypted*/ false),
_socket(scheduler->newSocket()),
_peerEndpoint() {}
SocketTcp(SocketTcp const& that) = delete;
@ -48,32 +48,32 @@ class SocketTcp final : public Socket {
int peerPort() const override { return _peerEndpoint.port(); }
void setNonBlocking(bool v) override { _socket.non_blocking(v); }
void setNonBlocking(bool v) override { _socket->non_blocking(v); }
size_t writeSome(basics::StringBuffer* buffer,
asio_ns::error_code& ec) override {
return _socket.write_some(
return _socket->write_some(
asio_ns::buffer(buffer->begin(), buffer->length()), ec);
}
void asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) override {
return asio_ns::async_write(_socket, buffer, strand.wrap(handler));
return asio_ns::async_write(*_socket, buffer, _strand->wrap(handler));
}
size_t readSome(asio_ns::mutable_buffers_1 const& buffer,
asio_ns::error_code& ec) override {
return _socket.read_some(buffer, ec);
return _socket->read_some(buffer, ec);
}
void asyncRead(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) override {
return _socket.async_read_some(buffer, strand.wrap(handler));
return _socket->async_read_some(buffer, _strand->wrap(handler));
}
void close(asio_ns::error_code& ec) override {
if (_socket.is_open()) {
_socket.close(ec);
if (_socket->is_open()) {
_socket->close(ec);
if (ec && ec != asio_ns::error::not_connected) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "closing socket failed with: " << ec.message();
@ -82,22 +82,22 @@ class SocketTcp final : public Socket {
}
std::size_t available(asio_ns::error_code& ec) override {
return static_cast<size_t>(_socket.available(ec));
return static_cast<size_t>(_socket->available(ec));
}
protected:
bool sslHandshake() override { return false; }
void shutdownReceive(asio_ns::error_code& ec) override {
_socket.shutdown(asio_ns::ip::tcp::socket::shutdown_receive, ec);
_socket->shutdown(asio_ns::ip::tcp::socket::shutdown_receive, ec);
}
void shutdownSend(asio_ns::error_code& ec) override {
_socket.shutdown(asio_ns::ip::tcp::socket::shutdown_send, ec);
_socket->shutdown(asio_ns::ip::tcp::socket::shutdown_send, ec);
}
private:
asio_ns::ip::tcp::socket _socket;
std::unique_ptr<asio_ns::ip::tcp::socket> _socket;
asio_ns::ip::tcp::acceptor::endpoint_type _peerEndpoint;
};
}

View File

@ -28,41 +28,41 @@ using namespace arangodb;
size_t SocketUnixDomain::writeSome(basics::StringBuffer* buffer,
asio_ns::error_code& ec) {
return _socket.write_some(asio_ns::buffer(buffer->begin(), buffer->length()),
return _socket->write_some(asio_ns::buffer(buffer->begin(), buffer->length()),
ec);
}
void SocketUnixDomain::asyncWrite(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) {
return asio_ns::async_write(_socket, buffer, handler);
return asio_ns::async_write(*_socket, buffer, handler);
}
size_t SocketUnixDomain::readSome(asio_ns::mutable_buffers_1 const& buffer,
asio_ns::error_code& ec) {
return _socket.read_some(buffer, ec);
return _socket->read_some(buffer, ec);
}
std::size_t SocketUnixDomain::available(asio_ns::error_code& ec) {
return _socket.available(ec);
return _socket->available(ec);
}
void SocketUnixDomain::asyncRead(asio_ns::mutable_buffers_1 const& buffer,
AsyncHandler const& handler) {
return _socket.async_read_some(buffer, handler);
return _socket->async_read_some(buffer, handler);
}
void SocketUnixDomain::shutdownReceive(asio_ns::error_code& ec) {
_socket.shutdown(asio_ns::local::stream_protocol::socket::shutdown_receive,
ec);
_socket->shutdown(asio_ns::local::stream_protocol::socket::shutdown_receive,
ec);
}
void SocketUnixDomain::shutdownSend(asio_ns::error_code& ec) {
_socket.shutdown(asio_ns::local::stream_protocol::socket::shutdown_send, ec);
_socket->shutdown(asio_ns::local::stream_protocol::socket::shutdown_send, ec);
}
void SocketUnixDomain::close(asio_ns::error_code& ec) {
if (_socket.is_open()) {
_socket.close(ec);
if (_socket->is_open()) {
_socket->close(ec);
if (ec && ec != asio_ns::error::not_connected) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "closing socket failed with: "
<< ec.message();

View File

@ -36,15 +36,16 @@ class SocketUnixDomain final : public Socket {
friend class AcceptorUnixDomain;
public:
SocketUnixDomain(asio_ns::io_context& ioService)
: Socket(ioService, false), _socket(ioService) {}
explicit SocketUnixDomain(rest::Scheduler* scheduler)
: Socket(scheduler, false),
_socket(scheduler->newDomainSocket()) {}
SocketUnixDomain(SocketUnixDomain&& that) = default;
std::string peerAddress() const override { return "local"; }
int peerPort() const override { return 0; }
void setNonBlocking(bool v) override { _socket.non_blocking(v); }
void setNonBlocking(bool v) override { _socket->non_blocking(v); }
size_t writeSome(basics::StringBuffer* buffer,
asio_ns::error_code& ec) override;
@ -67,7 +68,7 @@ class SocketUnixDomain final : public Socket {
void close(asio_ns::error_code& ec) override;
private:
asio_ns::local::stream_protocol::socket _socket;
std::unique_ptr<asio_ns::local::stream_protocol::socket> _socket;
asio_ns::local::stream_protocol::acceptor::endpoint_type _peerEndpoint;
};
}

View File

@ -27,8 +27,6 @@
#include <velocypack/Builder.h>
#include <velocypack/velocypack-aliases.h>
#include "Scheduler/EventLoop.h"
using namespace arangodb::rest;
namespace {
@ -36,7 +34,9 @@ std::atomic_uint_fast64_t NEXT_TASK_ID(static_cast<uint64_t>(TRI_microtime() *
100000.0));
}
Task::Task(arangodb::EventLoop loop, std::string const& name)
: _loop(loop),
Task::Task(Scheduler* scheduler, std::string const& name)
: _scheduler(scheduler),
_taskId(NEXT_TASK_ID.fetch_add(1, std::memory_order_seq_cst)),
_name(name) {}
_name(name) {
TRI_ASSERT(_scheduler != nullptr);
}

View File

@ -27,8 +27,6 @@
#include "Basics/Common.h"
#include "Scheduler/EventLoop.h"
namespace arangodb {
namespace velocypack {
class Builder;
@ -37,17 +35,18 @@ class Builder;
class TaskData;
namespace rest {
class Scheduler;
class Task : public std::enable_shared_from_this<Task> {
Task(Task const&) = delete;
Task& operator=(Task const&) = delete;
public:
Task(EventLoop, std::string const& name);
Task(Scheduler*, std::string const& name);
virtual ~Task() = default;
public:
uint64_t taskId() const { return _taskId; }
EventLoop eventLoop() const { return _loop; }
std::string const& name() const { return _name; }
// get a VelocyPack representation of the task for reporting
@ -55,7 +54,7 @@ class Task : public std::enable_shared_from_this<Task> {
void toVelocyPack(arangodb::velocypack::Builder&) const;
protected:
EventLoop _loop;
Scheduler * const _scheduler;
uint64_t const _taskId;
private:

View File

@ -351,8 +351,8 @@ void V8Task::start() {
ExecContext::CURRENT->isAdminUser() ||
(!_user.empty() && ExecContext::CURRENT->user() == _user));
auto ioService = SchedulerFeature::SCHEDULER->ioContext();
_timer.reset(new asio::steady_timer(*ioService));
_timer.reset(SchedulerFeature::SCHEDULER->newSteadyTimer());
if (_offset.count() <= 0) {
_offset = std::chrono::microseconds(1);
}

View File

@ -22,6 +22,7 @@
////////////////////////////////////////////////////////////////////////////////
#include "LocalTaskQueue.h"
#include "Basics/ConditionLocker.h"
#include "Basics/Exceptions.h"
#include "Basics/MutexLocker.h"

View File

@ -25,6 +25,8 @@
#if ARANGODB_STANDALONE_ASIO
#define ASIO_HAS_MOVE 1
#include <asio/buffer.hpp>
#include <asio/error.hpp>
#include <asio/io_context.hpp>