1
0
Fork 0

Merge branch 'obi-velocystream-4' of https://github.com/arangodb/arangodb into engine-vs-velocystream

This commit is contained in:
jsteemann 2016-07-28 16:11:26 +02:00
commit ebaabdb076
40 changed files with 4354 additions and 787 deletions

2352
Documentation/arango.doxygen Normal file

File diff suppressed because it is too large Load Diff

View File

@ -41,7 +41,7 @@ ExecutionBlock::~ExecutionBlock() {
_buffer.clear();
}
/// @brief returns the register id for a variable id
/// will return ExecutionNode::MaxRegisterId for an unknown variable
RegisterId ExecutionBlock::getRegister(VariableId id) const {
@ -81,7 +81,7 @@ bool ExecutionBlock::removeDependency(ExecutionBlock* ep) {
}
int ExecutionBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
DEBUG_BEGIN_BLOCK();
DEBUG_BEGIN_BLOCK();
for (auto& d : _dependencies) {
int res = d->initializeCursor(items, pos);
@ -97,7 +97,7 @@ int ExecutionBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
_done = false;
return TRI_ERROR_NO_ERROR;
DEBUG_END_BLOCK();
DEBUG_END_BLOCK();
}
/// @brief whether or not the query was killed
@ -176,14 +176,14 @@ void ExecutionBlock::returnBlock(AqlItemBlock*& block) {
void ExecutionBlock::inheritRegisters(AqlItemBlock const* src,
AqlItemBlock* dst, size_t srcRow,
size_t dstRow) {
DEBUG_BEGIN_BLOCK();
DEBUG_BEGIN_BLOCK();
RegisterId const n = src->getNrRegs();
auto planNode = getPlanNode();
for (RegisterId i = 0; i < n; i++) {
if (planNode->_regsToClear.find(i) == planNode->_regsToClear.end()) {
auto const& value = src->getValueReference(srcRow, i);
if (!value.isEmpty()) {
AqlValue a = value.clone();
AqlValueGuard guard(a, true);
@ -193,14 +193,14 @@ void ExecutionBlock::inheritRegisters(AqlItemBlock const* src,
}
}
}
DEBUG_END_BLOCK();
DEBUG_END_BLOCK();
}
/// @brief copy register data from one block (src) into another (dst)
/// register values are cloned
void ExecutionBlock::inheritRegisters(AqlItemBlock const* src,
AqlItemBlock* dst, size_t row) {
DEBUG_BEGIN_BLOCK();
DEBUG_BEGIN_BLOCK();
RegisterId const n = src->getNrRegs();
auto planNode = getPlanNode();
@ -211,18 +211,18 @@ void ExecutionBlock::inheritRegisters(AqlItemBlock const* src,
if (!value.isEmpty()) {
AqlValue a = value.clone();
AqlValueGuard guard(a, true);
TRI_IF_FAILURE("ExecutionBlock::inheritRegisters") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
dst->setValue(0, i, a);
guard.steal();
}
}
}
}
DEBUG_END_BLOCK();
DEBUG_END_BLOCK();
}
/// @brief the following is internal to pull one more block and append it to
@ -247,7 +247,7 @@ bool ExecutionBlock::getBlock(size_t atLeast, size_t atMost) {
docs.release();
return true;
DEBUG_END_BLOCK();
DEBUG_END_BLOCK();
}
/// @brief getSomeWithoutRegisterClearout, same as above, however, this

View File

@ -32,13 +32,28 @@
#if 0
#define DEBUG_BEGIN_BLOCK() try { //
#define DEBUG_END_BLOCK() } catch (arangodb::basics::Exception const& ex) { LOG(WARN) << "arango exception caught in " << __FILE__ << ":" << __LINE__ << ":" << ex.what(); throw; } catch (std::exception const& ex) { LOG(WARN) << "std exception caught in " << __FILE__ << ":" << __LINE__ << ": " << ex.what(); throw; } catch (...) { LOG(WARN) << "exception caught in " << __FILE__ << ":" << __LINE__; throw; } //
#define DEBUG_BEGIN_BLOCK() try { //
#define DEBUG_END_BLOCK() \
} \
catch (arangodb::basics::Exception const& ex) { \
LOG(WARN) << "arango exception caught in " << __FILE__ << ":" << __LINE__ \
<< ":" << ex.what(); \
throw; \
} \
catch (std::exception const& ex) { \
LOG(WARN) << "std exception caught in " << __FILE__ << ":" << __LINE__ \
<< ": " << ex.what(); \
throw; \
} \
catch (...) { \
LOG(WARN) << "exception caught in " << __FILE__ << ":" << __LINE__; \
throw; \
} //
#else
#define DEBUG_BEGIN_BLOCK() //
#define DEBUG_END_BLOCK() //
#define DEBUG_BEGIN_BLOCK() //
#define DEBUG_END_BLOCK() //
#endif
@ -54,7 +69,7 @@ class ExecutionBlock {
ExecutionBlock(ExecutionEngine*, ExecutionNode const*);
virtual ~ExecutionBlock();
public:
/// @brief batch size value
static constexpr inline size_t DefaultBatchSize() { return 1000; }

View File

@ -144,8 +144,7 @@ void Parser::registerParseError(int errorCode, char const* data, int line,
<< (column + 1);
if (_query->verboseErrors()) {
errorMessage << std::endl
<< _query->queryString() << std::endl;
errorMessage << std::endl << _query->queryString() << std::endl;
// create a neat pointer to the location of the error.
size_t i;

View File

@ -138,7 +138,8 @@ bool Query::DoDisableQueryTracking = false;
/// @brief creates a query
Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
char const* queryString, size_t queryLength,
std::shared_ptr<VPackBuilder> bindParameters, std::shared_ptr<VPackBuilder> options, QueryPart part)
std::shared_ptr<VPackBuilder> bindParameters,
std::shared_ptr<VPackBuilder> options, QueryPart part)
: _id(0),
_vocbase(vocbase),
_executor(nullptr),
@ -167,20 +168,18 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_isModificationQuery(false) {
// std::cout << TRI_CurrentThreadId() << ", QUERY " << this << " CTOR: " <<
// queryString << "\n";
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::Query queryString: "
<< std::string(queryString, queryLength)
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::Query queryString: " << std::string(queryString, queryLength)
<< " this: " << (uintptr_t) this;
if (bindParameters != nullptr &&
!bindParameters->isEmpty() &&
if (bindParameters != nullptr && !bindParameters->isEmpty() &&
!bindParameters->slice().isNone()) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << "bindParameters: "
<< bindParameters->slice().toJson();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< "bindParameters: " << bindParameters->slice().toJson();
}
if (options != nullptr &&
!options->isEmpty() &&
!options->slice().isNone()) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << "options: " << options->slice().toJson();
if (options != nullptr && !options->isEmpty() && !options->slice().isNone()) {
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< "options: " << options->slice().toJson();
}
TRI_ASSERT(_vocbase != nullptr);
}
@ -215,13 +214,13 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_contextOwnedByExterior(contextOwnedByExterior),
_killed(false),
_isModificationQuery(false) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::Query queryStruct: "
<< queryStruct->slice().toJson() << " this: " << (uintptr_t) this;
if (options != nullptr &&
!options->isEmpty() &&
!options->slice().isNone()) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << "options: " << options->slice().toJson();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::Query queryStruct: " << queryStruct->slice().toJson()
<< " this: " << (uintptr_t) this;
if (options != nullptr && !options->isEmpty() && !options->slice().isNone()) {
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< "options: " << options->slice().toJson();
}
TRI_ASSERT(_vocbase != nullptr);
}
@ -267,7 +266,8 @@ Query::~Query() {
for (auto& it : _graphs) {
delete it.second;
}
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::~Query this: " << (uintptr_t) this;
}
@ -275,8 +275,9 @@ Query::~Query() {
/// note: as a side-effect, this will also create and start a transaction for
/// the query
Query* Query::clone(QueryPart part, bool withPlan) {
auto clone = std::make_unique<Query>(false, _vocbase, _queryString,
_queryLength, std::shared_ptr<VPackBuilder>(), _options, part);
auto clone =
std::make_unique<Query>(false, _vocbase, _queryString, _queryLength,
std::shared_ptr<VPackBuilder>(), _options, part);
if (_plan != nullptr) {
if (withPlan) {
@ -313,7 +314,7 @@ Query* Query::clone(QueryPart part, bool withPlan) {
/// @brief add a node to the list of nodes
void Query::addNode(AstNode* node) { _nodes.emplace_back(node); }
void Query::setExecutionTime() {
if (_engine != nullptr) {
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
@ -425,7 +426,8 @@ void Query::registerWarning(int code, char const* details) {
/// QueryRegistry.
QueryResult Query::prepare(QueryRegistry* registry) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::prepare" << " this: " << (uintptr_t) this;
<< "Query::prepare"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
try {
@ -503,7 +505,8 @@ QueryResult Query::prepare(QueryRegistry* registry) {
}
// we have an execution plan in VelocyPack format
plan.reset(ExecutionPlan::instantiateFromVelocyPack(parser->ast(), _queryBuilder->slice()));
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
parser->ast(), _queryBuilder->slice()));
if (plan.get() == nullptr) {
// oops
return QueryResult(TRI_ERROR_INTERNAL);
@ -551,7 +554,8 @@ QueryResult Query::prepare(QueryRegistry* registry) {
/// @brief execute an AQL query
QueryResult Query::execute(QueryRegistry* registry) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::execute" << " this: " << (uintptr_t) this;
<< "Query::execute"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
std::unique_ptr<AqlWorkStack> work;
@ -612,7 +616,8 @@ QueryResult Query::execute(QueryRegistry* registry) {
TRI_ASSERT(_engine != nullptr);
auto resultBuilder = std::make_shared<VPackBuilder>(&options);
resultBuilder->buffer()->reserve(16 * 1024); // reserve some space in Builder to avoid frequent reallocs
resultBuilder->buffer()->reserve(
16 * 1024); // reserve some space in Builder to avoid frequent reallocs
try {
resultBuilder->openArray();
@ -635,16 +640,17 @@ QueryResult Query::execute(QueryRegistry* registry) {
delete value;
value = nullptr;
}
// must close result array here because it must be passed as a closed array
// must close result array here because it must be passed as a closed
// array
// to the query cache
resultBuilder->close();
if (_warnings.empty()) {
// finally store the generated result in the query cache
auto result = QueryCache::instance()->store(
_vocbase, queryStringHash, _queryString, _queryLength, resultBuilder,
_trx->collectionNames());
_vocbase, queryStringHash, _queryString, _queryLength,
resultBuilder, _trx->collectionNames());
if (result == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
@ -654,7 +660,6 @@ QueryResult Query::execute(QueryRegistry* registry) {
// iterate over result and return it
while (nullptr != (value = _engine->getSome(
1, ExecutionBlock::DefaultBatchSize()))) {
size_t const n = value->size();
for (size_t i = 0; i < n; ++i) {
AqlValue const& val = value->getValueReference(i, resultRegister);
@ -666,7 +671,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
delete value;
value = nullptr;
}
// must close result array
resultBuilder->close();
}
@ -696,7 +701,8 @@ QueryResult Query::execute(QueryRegistry* registry) {
}
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::execute:returning" << " this: " << (uintptr_t) this;
<< "Query::execute:returning"
<< " this: " << (uintptr_t) this;
return result;
} catch (arangodb::basics::Exception const& ex) {
setExecutionTime();
@ -724,7 +730,8 @@ QueryResult Query::execute(QueryRegistry* registry) {
/// may only be called with an active V8 handle scope
QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8" << " this: " << (uintptr_t) this;
<< "Query::executeV8"
<< " this: " << (uintptr_t) this;
std::unique_ptr<AqlWorkStack> work;
try {
@ -747,9 +754,11 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
// a mimimal context to build the result
res.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
v8::Handle<v8::Value> values = TRI_VPackToV8(isolate, cacheEntry->_queryResult->slice(), res.context->getVPackOptions());
v8::Handle<v8::Value> values =
TRI_VPackToV8(isolate, cacheEntry->_queryResult->slice(),
res.context->getVPackOptions());
TRI_ASSERT(values->IsArray());
res.result = v8::Handle<v8::Array>::Cast(values);
res.result = v8::Handle<v8::Array>::Cast(values);
res.cached = true;
return res;
}
@ -810,7 +819,8 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
if (_warnings.empty()) {
// finally store the generated result in the query cache
QueryCache::instance()->store(_vocbase, queryStringHash, _queryString,
_queryLength, builder, _trx->collectionNames());
_queryLength, builder,
_trx->collectionNames());
}
} else {
// iterate over result and return it
@ -840,7 +850,8 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
}
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8: before _trx->commit" << " this: " << (uintptr_t) this;
<< "Query::executeV8: before _trx->commit"
<< " this: " << (uintptr_t) this;
_trx->commit();
@ -850,7 +861,8 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
result.context = _trx->transactionContext();
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::executeV8: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this;
@ -1070,8 +1082,8 @@ char* Query::registerEscapedString(char const* p, size_t length,
return const_cast<char*>(EmptyString);
}
char* copy =
TRI_UnescapeUtf8String(TRI_UNKNOWN_MEM_ZONE, p, length, &outLength, false);
char* copy = TRI_UnescapeUtf8String(TRI_UNKNOWN_MEM_ZONE, p, length,
&outLength, false);
if (copy == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
@ -1231,8 +1243,7 @@ void Query::log() {
LOG_TOPIC(TRACE, Logger::QUERIES)
<< "executing query " << _id << ": '"
<< std::string(_queryString, (std::min)(_queryLength, MaxLength))
.append(_queryLength > MaxLength ? "..." : "")
<< "'";
.append(_queryLength > MaxLength ? "..." : "") << "'";
}
}
@ -1348,7 +1359,7 @@ bool Query::inspectSimplePlans() const {
/// @brief read the "optimizer.rules" section from the options
std::vector<std::string> Query::getRulesFromOptions() const {
std::vector<std::string> rules;
if (_options == nullptr) {
return rules;
}
@ -1383,7 +1394,8 @@ std::vector<std::string> Query::getRulesFromOptions() const {
/// @brief enter a new state
void Query::enterState(ExecutionState state) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::enterState: " << state << " this: " << (uintptr_t) this;
<< "Query::enterState: " << state
<< " this: " << (uintptr_t) this;
if (_profile != nullptr) {
// record timing for previous state
_profile->setDone(_state);
@ -1437,7 +1449,8 @@ void Query::setPlan(ExecutionPlan* plan) {
}
/// @brief create a TransactionContext
std::shared_ptr<arangodb::TransactionContext> Query::createTransactionContext() {
std::shared_ptr<arangodb::TransactionContext>
Query::createTransactionContext() {
if (_contextOwnedByExterior) {
// we can use v8
return arangodb::V8TransactionContext::Create(_vocbase, true);

View File

@ -190,12 +190,12 @@ add_executable(${BIN_ARANGOD}
GeneralServer/GeneralServer.cpp
GeneralServer/GeneralServerFeature.cpp
GeneralServer/HttpCommTask.cpp
GeneralServer/HttpCommTask.cpp
GeneralServer/HttpServerJob.cpp
GeneralServer/HttpsCommTask.cpp
GeneralServer/PathHandler.cpp
GeneralServer/RestHandler.cpp
GeneralServer/RestHandlerFactory.cpp
GeneralServer/VppCommTask.cpp
GeoIndex/GeoIndex.cpp
Indexes/EdgeIndex.cpp
Indexes/FulltextIndex.cpp

View File

@ -69,11 +69,11 @@ void ClusterCommResult::setDestination(std::string const& dest,
serverID = "";
status = CL_COMM_BACKEND_UNAVAILABLE;
if (logConnectionErrors) {
LOG(ERR) << "cannot find responsible server for shard '"
<< shardID << "'";
LOG(ERR) << "cannot find responsible server for shard '" << shardID
<< "'";
} else {
LOG(INFO) << "cannot find responsible server for shard '"
<< shardID << "'";
LOG(INFO) << "cannot find responsible server for shard '" << shardID
<< "'";
}
return;
}
@ -107,11 +107,9 @@ void ClusterCommResult::setDestination(std::string const& dest,
status = CL_COMM_BACKEND_UNAVAILABLE;
errorMessage = "did not find endpoint of server '" + serverID + "'";
if (logConnectionErrors) {
LOG(ERR) << "did not find endpoint of server '" << serverID
<< "'";
LOG(ERR) << "did not find endpoint of server '" << serverID << "'";
} else {
LOG(INFO) << "did not find endpoint of server '" << serverID
<< "'";
LOG(INFO) << "did not find endpoint of server '" << serverID << "'";
}
}
}
@ -237,12 +235,11 @@ OperationID ClusterComm::getOperationID() { return TRI_NewTickServer(); }
OperationID ClusterComm::asyncRequest(
ClientTransactionID const clientTransactionID,
CoordTransactionID const coordTransactionID, std::string const& destination,
arangodb::GeneralRequest::RequestType reqtype,
std::string const& path, std::shared_ptr<std::string const> body,
arangodb::GeneralRequest::RequestType reqtype, std::string const& path,
std::shared_ptr<std::string const> body,
std::unique_ptr<std::unordered_map<std::string, std::string>>& headerFields,
std::shared_ptr<ClusterCommCallback> callback, ClusterCommTimeout timeout,
bool singleRequest, ClusterCommTimeout initTimeout) {
TRI_ASSERT(headerFields.get() != nullptr);
auto op = std::make_unique<ClusterCommOperation>();
@ -281,7 +278,7 @@ OperationID ClusterComm::asyncRequest(
receivedByOpID[opId] = --q;
if (nullptr != callback) {
op.reset(*q);
if ( (*callback.get())(&(op->result)) ) {
if ((*callback.get())(&(op->result))) {
auto i = receivedByOpID.find(opId);
receivedByOpID.erase(i);
received.erase(q);
@ -319,8 +316,8 @@ OperationID ClusterComm::asyncRequest(
ServerState::instance()->getAuthentication();
}
TRI_voc_tick_t timeStamp = TRI_HybridLogicalClock();
(*op->headerFields)[StaticStrings::HLCHeader]
= arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
(*op->headerFields)[StaticStrings::HLCHeader] =
arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
#ifdef DEBUG_CLUSTER_COMM
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
@ -379,8 +376,8 @@ OperationID ClusterComm::asyncRequest(
std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
ClientTransactionID const& clientTransactionID,
CoordTransactionID const coordTransactionID, std::string const& destination,
arangodb::GeneralRequest::RequestType reqtype,
std::string const& path, std::string const& body,
arangodb::GeneralRequest::RequestType reqtype, std::string const& path,
std::string const& body,
std::unordered_map<std::string, std::string> const& headerFields,
ClusterCommTimeout timeout) {
std::unordered_map<std::string, std::string> headersCopy(headerFields);
@ -425,17 +422,16 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
res->errorMessage =
"cannot create connection to server '" + res->serverID + "'";
if (logConnectionErrors()) {
LOG(ERR) << "cannot create connection to server '"
<< res->serverID << "' at endpoint '" << res->endpoint << "'";
LOG(ERR) << "cannot create connection to server '" << res->serverID
<< "' at endpoint '" << res->endpoint << "'";
} else {
LOG(INFO) << "cannot create connection to server '"
<< res->serverID << "' at endpoint '" << res->endpoint << "'";
LOG(INFO) << "cannot create connection to server '" << res->serverID
<< "' at endpoint '" << res->endpoint << "'";
}
} else {
LOG(DEBUG) << "sending "
<< arangodb::HttpRequest::translateMethod(reqtype)
<< " request to DB server '" << res->serverID << "' at endpoint '" << res->endpoint
<< "': " << body;
LOG(DEBUG) << "sending " << arangodb::HttpRequest::translateMethod(reqtype)
<< " request to DB server '" << res->serverID
<< "' at endpoint '" << res->endpoint << "': " << body;
// LOCKING-DEBUG
// std::cout << "syncRequest: sending " <<
// arangodb::rest::HttpRequest::translateMethod(reqtype) << " request to
@ -450,8 +446,8 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
headersCopy["Authorization"] = ServerState::instance()->getAuthentication();
TRI_voc_tick_t timeStamp = TRI_HybridLogicalClock();
headersCopy[StaticStrings::HLCHeader]
= arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
headersCopy[StaticStrings::HLCHeader] =
arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
#ifdef DEBUG_CLUSTER_COMM
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
#if ARANGODB_ENABLE_BACKTRACE
@ -801,8 +797,9 @@ void ClusterComm::drop(ClientTransactionID const& clientTransactionID,
////////////////////////////////////////////////////////////////////////////////
void ClusterComm::asyncAnswer(std::string& coordinatorHeader,
GeneralResponse* responseToSend) {
GeneralResponse* response) {
// FIXME - generalize for VPP
HttpResponse* responseToSend = dynamic_cast<HttpResponse*>(response);
if (responseToSend == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
@ -847,14 +844,15 @@ void ClusterComm::asyncAnswer(std::string& coordinatorHeader,
return;
}
std::unordered_map<std::string, std::string> headers = responseToSend->headers();
std::unordered_map<std::string, std::string> headers =
responseToSend->headers();
headers["X-Arango-Coordinator"] = coordinatorHeader;
headers["X-Arango-Response-Code"] =
responseToSend->responseString(responseToSend->responseCode());
headers["Authorization"] = ServerState::instance()->getAuthentication();
TRI_voc_tick_t timeStamp = TRI_HybridLogicalClock();
headers[StaticStrings::HLCHeader]
= arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
headers[StaticStrings::HLCHeader] =
arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp);
char const* body = responseToSend->body().c_str();
size_t len = responseToSend->body().length();
@ -989,7 +987,7 @@ bool ClusterComm::moveFromSendToReceived(OperationID operationID) {
IndexIterator i = toSendByOpID.find(operationID); // cannot fail
// TRI_ASSERT(i != toSendByOpID.end());
//KV: Except the operation has been dropped in the meantime
// KV: Except the operation has been dropped in the meantime
QueueIterator q = i->second;
ClusterCommOperation* op = *q;
@ -1076,10 +1074,8 @@ void ClusterCommThread::beginShutdown() {
////////////////////////////////////////////////////////////////////////////////
size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
ClusterCommTimeout timeout,
size_t& nrDone,
ClusterCommTimeout timeout, size_t& nrDone,
arangodb::LogTopic const& logTopic) {
if (requests.size() == 0) {
nrDone = 0;
return 0;
@ -1116,28 +1112,25 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
for (size_t i = 0; i < requests.size(); i++) {
if (!requests[i].done && now >= dueTime[i]) {
if (requests[i].headerFields.get() == nullptr) {
requests[i].headerFields
= std::make_unique<std::unordered_map<std::string, std::string>>();
requests[i].headerFields = std::make_unique<
std::unordered_map<std::string, std::string>>();
}
LOG_TOPIC(TRACE, logTopic)
<< "ClusterComm::performRequests: sending request to "
<< requests[i].destination << ":" << requests[i].path
<< "body:" << requests[i].body;
double localInitTimeout
= (std::min)((std::max)(1.0, now - startTime), 10.0);
double localInitTimeout =
(std::min)((std::max)(1.0, now - startTime), 10.0);
double localTimeout = endTime - now;
dueTime[i] = endTime + 10; // no retry unless ordered elsewhere
if (localInitTimeout > localTimeout) {
localInitTimeout = localTimeout;
}
OperationID opId = asyncRequest("", coordinatorTransactionID,
requests[i].destination,
requests[i].requestType,
requests[i].path,
requests[i].body,
requests[i].headerFields,
nullptr, localTimeout,
false, localInitTimeout);
OperationID opId = asyncRequest(
"", coordinatorTransactionID, requests[i].destination,
requests[i].requestType, requests[i].path, requests[i].body,
requests[i].headerFields, nullptr, localTimeout, false,
localInitTimeout);
opIDtoIndex.insert(std::make_pair(opId, i));
// It is possible that an error occurs right away, we will notice
// below after wait(), though, and retry in due course.
@ -1158,7 +1151,8 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
if (now >= actionNeeded) {
break;
}
auto res = wait("", coordinatorTransactionID, 0, "", actionNeeded - now);
auto res =
wait("", coordinatorTransactionID, 0, "", actionNeeded - now);
if (res.status == CL_COMM_TIMEOUT && res.operationID == 0) {
// Did not receive any result until the timeout (of wait) was hit.
break;
@ -1169,14 +1163,15 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
if (now >= actionNeeded) {
break;
}
usleep( (std::min)(500000,
static_cast<int>((actionNeeded - now) * 1000000)) );
usleep((std::min)(500000,
static_cast<int>((actionNeeded - now) * 1000000)));
continue;
}
auto it = opIDtoIndex.find(res.operationID);
if (it == opIDtoIndex.end()) {
// Ooops, we got a response to which we did not send the request
LOG(ERR) << "Received ClusterComm response for a request we did not send!";
LOG(ERR)
<< "Received ClusterComm response for a request we did not send!";
continue;
}
size_t index = it->second;
@ -1189,17 +1184,18 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
res.answer_code == GeneralResponse::ResponseCode::ACCEPTED) {
nrGood++;
}
LOG_TOPIC(TRACE, logTopic) << "ClusterComm::performRequests: "
LOG_TOPIC(TRACE, logTopic)
<< "ClusterComm::performRequests: "
<< "got answer from " << requests[index].destination << ":"
<< requests[index].path << " with return code "
<< (int) res.answer_code;
<< (int)res.answer_code;
} else if (res.status == CL_COMM_BACKEND_UNAVAILABLE ||
(res.status == CL_COMM_TIMEOUT && !res.sendWasComplete)) {
requests[index].result = res;
// In this case we will retry:
dueTime[index] = (std::min)(10.0,
(std::max)(0.2, 2 * (now - startTime))) +
startTime;
dueTime[index] =
(std::min)(10.0, (std::max)(0.2, 2 * (now - startTime))) +
startTime;
if (dueTime[index] >= endTime) {
requests[index].done = true;
nrDone++;
@ -1207,15 +1203,16 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
if (dueTime[index] < actionNeeded) {
actionNeeded = dueTime[index];
}
LOG_TOPIC(TRACE, logTopic) << "ClusterComm::performRequests: "
LOG_TOPIC(TRACE, logTopic)
<< "ClusterComm::performRequests: "
<< "got BACKEND_UNAVAILABLE or TIMEOUT from "
<< requests[index].destination << ":"
<< requests[index].path;
} else { // a "proper error"
<< requests[index].destination << ":" << requests[index].path;
} else { // a "proper error"
requests[index].result = res;
requests[index].done = true;
nrDone++;
LOG_TOPIC(TRACE, logTopic) << "ClusterComm::performRequests: "
LOG_TOPIC(TRACE, logTopic)
<< "ClusterComm::performRequests: "
<< "got no answer from " << requests[index].destination << ":"
<< requests[index].path << " with error " << res.status;
}
@ -1227,14 +1224,14 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
}
} catch (...) {
LOG_TOPIC(ERR, logTopic) << "ClusterComm::performRequests: "
<< "caught exception, ignoring...";
<< "caught exception, ignoring...";
}
// We only get here if the global timeout was triggered, not all
// requests are marked by done!
LOG_TOPIC(DEBUG, logTopic) << "ClusterComm::performRequests: "
<< "got timeout, this will be reported...";
<< "got timeout, this will be reported...";
// Forget about
drop("", coordinatorTransactionID, 0, "");
@ -1250,15 +1247,13 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
//////////////////////////////////////////////////////////////////////////////
size_t ClusterComm::performSingleRequest(
std::vector<ClusterCommRequest>& requests,
ClusterCommTimeout timeout,
size_t& nrDone,
arangodb::LogTopic const& logTopic) {
std::vector<ClusterCommRequest>& requests, ClusterCommTimeout timeout,
size_t& nrDone, arangodb::LogTopic const& logTopic) {
CoordTransactionID coordinatorTransactionID = TRI_NewTickServer();
ClusterCommRequest& req(requests[0]);
if (req.headerFields.get() == nullptr) {
req.headerFields
= std::make_unique<std::unordered_map<std::string, std::string>>();
req.headerFields =
std::make_unique<std::unordered_map<std::string, std::string>>();
}
if (req.body == nullptr) {
req.result = *syncRequest("", coordinatorTransactionID, req.destination,
@ -1289,7 +1284,8 @@ size_t ClusterComm::performSingleRequest(
return (req.result.answer_code == GeneralResponse::ResponseCode::OK ||
req.result.answer_code == GeneralResponse::ResponseCode::CREATED ||
req.result.answer_code == GeneralResponse::ResponseCode::ACCEPTED)
? 1 : 0;
? 1
: 0;
}
////////////////////////////////////////////////////////////////////////////////
@ -1346,27 +1342,26 @@ void ClusterCommThread::run() {
op->result.errorMessage += op->result.serverID;
if (cc->logConnectionErrors()) {
LOG(ERR) << "cannot create connection to server '"
<< op->result.serverID << "' at endpoint '" << op->result.endpoint << "'";
<< op->result.serverID << "' at endpoint '"
<< op->result.endpoint << "'";
} else {
LOG(INFO) << "cannot create connection to server '"
<< op->result.serverID << "' at endpoint '" << op->result.endpoint << "'";
<< op->result.serverID << "' at endpoint '"
<< op->result.endpoint << "'";
}
} else {
if (nullptr != op->body.get()) {
LOG(DEBUG) << "sending "
<< arangodb::HttpRequest::translateMethod(
op->reqtype)
.c_str()
<< " request to DB server '"
<< op->result.serverID << "' at endpoint '" << op->result.endpoint
<< "': " << op->body->c_str();
<< arangodb::HttpRequest::translateMethod(op->reqtype)
.c_str() << " request to DB server '"
<< op->result.serverID << "' at endpoint '"
<< op->result.endpoint << "': " << op->body->c_str();
} else {
LOG(DEBUG) << "sending "
<< arangodb::HttpRequest::translateMethod(
op->reqtype)
.c_str()
<< " request to DB server '"
<< op->result.serverID << "' at endpoint '" << op->result.endpoint << "'";
<< arangodb::HttpRequest::translateMethod(op->reqtype)
.c_str() << " request to DB server '"
<< op->result.serverID << "' at endpoint '"
<< op->result.endpoint << "'";
}
auto client =
@ -1432,7 +1427,7 @@ void ClusterCommThread::run() {
cc->toSendByOpID.erase(i);
cc->toSend.erase(q);
delete op;
continue; // do not move to the received queue but forget it
continue; // do not move to the received queue but forget it
}
}
}
@ -1449,14 +1444,14 @@ void ClusterCommThread::run() {
CONDITION_LOCKER(locker, cc->somethingReceived);
ClusterComm::QueueIterator q;
for (q = cc->received.begin(); q != cc->received.end(); ) {
for (q = cc->received.begin(); q != cc->received.end();) {
bool deleted = false;
op = *q;
if (op->result.status == CL_COMM_SENT) {
if (op->endTime < currentTime) {
op->result.status = CL_COMM_TIMEOUT;
if (nullptr != op->callback.get()) {
if ( (*op->callback.get())(&op->result) ) {
if ((*op->callback.get())(&op->result)) {
// This is fully processed, so let's remove it from the queue:
auto i = cc->receivedByOpID.find(op->result.operationID);
TRI_ASSERT(i != cc->receivedByOpID.end());

View File

@ -40,24 +40,12 @@ using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
////////////////////////////////////////////////////////////////////////////////
/// @brief static initializers
////////////////////////////////////////////////////////////////////////////////
GeneralCommTask::GeneralCommTask(GeneralServer* server, TRI_socket_t socket,
ConnectionInfo&& info, double keepAliveTimeout)
: Task("GeneralCommTask"),
SocketTask(socket, keepAliveTimeout),
_server(server),
_request(nullptr),
_connectionInfo(std::move(info)),
_protocol("unknown"),
_protocolVersion(GeneralRequest::ProtocolVersion::UNKNOWN),
_startThread(false),
_writeBuffers(),
_writeBuffersStats(),
_isChunked(false),
_requestPending(false) {
_connectionInfo(std::move(info)) {
LOG(TRACE) << "connection established, client "
<< TRI_get_fd_or_handle_of_socket(socket) << ", server ip "
<< _connectionInfo.serverAddress << ", server port "
@ -70,30 +58,172 @@ GeneralCommTask::~GeneralCommTask() {
LOG(TRACE) << "connection closed, client "
<< TRI_get_fd_or_handle_of_socket(_commSocket);
// free write buffers and statistics
for (auto& i : _writeBuffers) delete i;
for (auto& i : _writeBuffersStats) TRI_ReleaseRequestStatistics(i);
for (auto& i : _writeBuffers) {
delete i;
}
// free request
delete _request;
for (auto& i : _writeBuffersStats) {
TRI_ReleaseRequestStatistics(i);
}
clearRequest();
}
void GeneralCommTask::handleResponse(GeneralResponse* response) {
_requestPending = false;
_isChunked = false;
_startThread = false;
void GeneralCommTask::signalTask(TaskData* data) {
// data response
if (data->_type == TaskData::TASK_DATA_RESPONSE) {
data->RequestStatisticsAgent::transferTo(this);
GeneralResponse* response = data->_response.get();
if (response != nullptr) {
processResponse(response);
processRead();
} else {
handleSimpleError(GeneralResponse::ResponseCode::SERVER_ERROR);
}
}
// buffer response
else if (data->_type == TaskData::TASK_DATA_BUFFER) {
data->RequestStatisticsAgent::transferTo(this);
HttpResponse response(GeneralResponse::ResponseCode::OK);
velocypack::Slice slice(data->_buffer->data());
response.setPayload(_request, slice, true, VPackOptions::Defaults);
processResponse(&response);
processRead();
}
// data chunk
else if (data->_type == TaskData::TASK_DATA_CHUNK) {
handleChunk(data->_data.c_str(), data->_data.size());
}
// do not know, what to do - give up
else {
_scheduler->destroyTask(this);
}
}
bool GeneralCommTask::handleRead() {
bool res = true;
if (!_closeRequested) {
res = fillReadBuffer();
// process as much data as we got; there might be more than one
// request in the buffer
while (processRead()) {
if (_closeRequested) {
break;
}
}
} else {
// if we don't close here, the scheduler thread may fall into a
// busy wait state, consuming 100% CPU!
_clientClosed = true;
}
if (_clientClosed) {
res = false;
} else if (!res) {
_clientClosed = true;
}
return res;
}
void GeneralCommTask::fillWriteBuffer() {
if (!hasWriteBuffer() && !_writeBuffers.empty()) {
StringBuffer* buffer = _writeBuffers.front();
_writeBuffers.pop_front();
TRI_ASSERT(buffer != nullptr);
TRI_request_statistics_t* statistics = _writeBuffersStats.front();
_writeBuffersStats.pop_front();
setWriteBuffer(buffer, statistics);
}
}
void GeneralCommTask::executeRequest(GeneralRequest* request,
GeneralResponse* response) {
// create a handler for this request
WorkItem::uptr<RestHandler> handler(
GeneralServerFeature::HANDLER_FACTORY->createHandler(request, response));
if (handler == nullptr) {
LOG(TRACE) << "no handler is known, giving up";
clearRequest();
delete response;
handleSimpleError(GeneralResponse::ResponseCode::NOT_FOUND);
return;
}
handler->setTaskId(_taskId, _loop);
// check for an async request
bool found = false;
std::string const& asyncExecution =
request->header(StaticStrings::Async, found);
// the responsibility for the request has been moved to the handler
// TODO(fc) _request should
_request = nullptr;
// async execution
bool ok = false;
if (found && (asyncExecution == "true" || asyncExecution == "store")) {
requestStatisticsAgentSetAsync();
uint64_t jobId = 0;
if (asyncExecution == "store") {
// persist the responses
ok = _server->handleRequestAsync(this, handler, &jobId);
} else {
// don't persist the responses
ok = _server->handleRequestAsync(this, handler, nullptr);
}
if (ok) {
HttpResponse response(GeneralResponse::ResponseCode::ACCEPTED);
if (jobId > 0) {
// return the job id we just created
response.setHeaderNC(StaticStrings::AsyncId, StringUtils::itoa(jobId));
}
processResponse(&response);
return;
}
}
// synchronous request
else {
ok = _server->handleRequest(this, handler);
}
if (!ok) {
handleSimpleError(GeneralResponse::ResponseCode::SERVER_ERROR);
}
}
void GeneralCommTask::processResponse(GeneralResponse* response) {
if (response == nullptr) {
handleSimpleError(GeneralResponse::ResponseCode::SERVER_ERROR);
} else {
addResponse(response);
addResponse(response, false);
}
}
void GeneralCommTask::handleSimpleError(GeneralResponse::ResponseCode code) {
HttpResponse response(code);
resetState(true);
addResponse(&response);
addResponse(&response, true);
}
void GeneralCommTask::handleSimpleError(
@ -113,17 +243,18 @@ void GeneralCommTask::handleSimpleError(
response.setPayload(_request, builder.slice(), true,
VPackOptions::Defaults);
clearRequest();
handleResponse(&response);
processResponse(&response);
} catch (...) {
resetState(true);
addResponse(&response);
addResponse(&response, true);
}
}
// TODO(fc) MOVE TO SOCKET TASK
bool GeneralCommTask::handleEvent(EventToken token, EventType events) {
// destroy this task if client is closed
bool result = SocketTask::handleEvent(token, events);
if (_clientClosed) _scheduler->destroyTask(this);
if (_clientClosed) {
_scheduler->destroyTask(this);
}
return result;
}
void GeneralCommTask::handleTimeout() { _clientClosed = true; }

View File

@ -33,7 +33,6 @@
#include "Basics/StringBuffer.h"
#include "Basics/WorkItem.h"
#include <deque>
namespace arangodb {
class GeneralRequest;
class GeneralResponse;
@ -41,6 +40,87 @@ class GeneralResponse;
namespace rest {
class GeneralServer;
//
// The flow of events is as follows:
//
// (1) As soon as new data is available from the client, then `handleRead()` is
// called. This will read new data from the client using
// `SocketTask::fillReadBuffer()`.
//
// (2) After reading data from the client, `processRead()` is called. Each
// sub-class of `GeneralCommTask` must implement this method.
//
// (3) As soon as `processRead()` detects that it has read a complete request,
// it must create an instance of a sub-class of `GeneralRequest` and
// `GeneralResponse`. Then it must call `executeRequest(...)` to start the
// execution of the request.
//
// (4) `executeRequest(...)` will create a handler. A handler is responsible for
// executing the request. It will take the `request` instance and executes a
// plan to generate a response. It is possible, that one request generates a
// response and still does some work afterwards. It is even possible, that a
// request generates a push stream.
//
// As soon as a response is available, `handleResponse()` will be called.
// This in turn calls `addResponse()` which must be implemented in the
// sub-class. It will be called with an response object and an indicator
// if an error has occurred.
//
// It is the responsibility of the sub-class to govern what is support. For
// example, HTTP will only support one active request executing at a time
// until the final response has been send out.
//
// VelocyPack on the other hand, allows multiple active requests. Partial
// responses are identified by a request id.
//
// (5) Error handling: In case of an error `handleSimpleError()` will be
// called. This will call `addResponse()` with an error indicator, which in
// turn will end the responding request.
//
// External Interface (called from Scheduler):
//
// (1) handleRead
//
// Will be called when new data can be read from the client. It will
// use `SocketTask::fillReadBuffer()` to actually read the data into the
// read buffer (defined in `SocketTask`).
//
// (2) signalTask
//
// `signalTask` will be called when data becomes available from an
// asynchronous execution.
//
// (3) addResponse
//
// see below.
//
// Internal Interface (must be implemented by sub-classes):
//
// (1) processRead
//
// Will be called as soon as new data has been read from the client. The
// method must split the read buffer into requests and call `executeRequest`
// for each request found. It is the responsiblity of the `processRead` to
// cleanup the read buffer periodically.
//
// (2) addResponse
//
// Will be called when a new response is available.
//
// Ownership and life-time:
//
// (1) The Task will live as long as there is at least one active handler.
// TODO(fc)
//
// (2) The Task owns the handlers and is responsible for destroying them.
// TODO(fc)
//
// handleEvent (defined in SocketTask and arumented in this class) is called
// when new data is available. handleEvent calls in turn handleWrite and
// handleRead (virtual function required by SocketTask) that calls processRead
// (which has to be implemented in derived) as long as new input is available.
class GeneralCommTask : public SocketTask, public RequestStatisticsAgent {
GeneralCommTask(GeneralCommTask const&) = delete;
GeneralCommTask const& operator=(GeneralCommTask const&) = delete;
@ -49,48 +129,79 @@ class GeneralCommTask : public SocketTask, public RequestStatisticsAgent {
GeneralCommTask(GeneralServer*, TRI_socket_t, ConnectionInfo&&,
double keepAliveTimeout);
// return whether or not the task desires to start a dispatcher thread
bool startThread() const { return _startThread; } // called by server
void handleResponse(GeneralResponse*); // called by server
protected:
virtual ~GeneralCommTask();
public:
virtual void addResponse(GeneralResponse*, bool error) = 0;
// void handleResponse(GeneralResponse*); // resets vars and calls
// addResponse
protected:
void signalTask(TaskData*) override;
protected:
virtual bool processRead() = 0;
virtual void handleChunk(char const*, size_t) = 0;
protected:
bool handleRead() override final;
void executeRequest(GeneralRequest*, GeneralResponse*);
// TODO(fc) move to SocketTask
// main callback of this class - called by base SocketTask - this version
// calls the SocketTask's handleEvent
virtual bool handleEvent(EventToken token, EventType events) override;
void processResponse(GeneralResponse*);
void handleSimpleError(GeneralResponse::ResponseCode);
void handleSimpleError(GeneralResponse::ResponseCode, int code,
std::string const& errorMessage);
protected:
virtual ~GeneralCommTask();
virtual void addResponse(GeneralResponse*) = 0;
virtual bool processRead() = 0;
virtual void processRequest() = 0;
virtual void resetState(bool) = 0;
virtual bool handleEvent(EventToken token,
EventType events) override; // called by TODO
void cleanup() override final { SocketTask::cleanup(); }
// clears the request object
// clears the request object, TODO(fc) see below
void clearRequest() {
delete _request;
if (_request != nullptr) {
delete _request;
}
_request = nullptr;
}
private:
void handleTimeout() override final;
//
// internal members
//
protected:
void handleTimeout() override final { _clientClosed = true; }
protected:
void fillWriteBuffer(); // fills SocketTasks _writeBuffer
// _writeBufferStatistics from
// _writeBuffers/_writeBuffersStats
// for asynchronous requests
GeneralServer* const _server;
GeneralRequest* _request; // the request with possible incomplete body
// the request with possible incomplete body
// TODO(fc) needs to be removed, depends on the underlying protocol
GeneralRequest* _request = nullptr;
// information about the client
ConnectionInfo _connectionInfo;
char const* _protocol; // protocal to use http, vpp
GeneralRequest::ProtocolVersion _protocolVersion;
bool _startThread;
// protocol to use http, vpp
char const* _protocol = "unknown";
GeneralRequest::ProtocolVersion _protocolVersion =
GeneralRequest::ProtocolVersion::UNKNOWN;
// true if a close has been requested by the client
bool _closeRequested = false;
std::deque<basics::StringBuffer*> _writeBuffers;
std::deque<TRI_request_statistics_t*>
_writeBuffersStats; // statistics buffers
bool _isChunked; // true if within a chunked response
bool _requestPending; // true if request is complete but not handled
std::deque<TRI_request_statistics_t*> _writeBuffersStats;
};
}
}

View File

@ -61,18 +61,6 @@ int GeneralServer::sendChunk(uint64_t taskId, std::string const& data) {
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief constructs a new general server with dispatcher and job manager
////////////////////////////////////////////////////////////////////////////////
GeneralServer::GeneralServer(
bool allowMethodOverride,
std::vector<std::string> const& accessControlAllowOrigins)
: _listenTasks(),
_endpointList(nullptr),
_allowMethodOverride(allowMethodOverride),
_accessControlAllowOrigins(accessControlAllowOrigins) {}
////////////////////////////////////////////////////////////////////////////////
/// @brief destructs a general server
////////////////////////////////////////////////////////////////////////////////
@ -129,7 +117,7 @@ void GeneralServer::stopListening() {
bool GeneralServer::handleRequestAsync(GeneralCommTask* task,
WorkItem::uptr<RestHandler>& handler,
uint64_t* jobId) {
bool startThread = task->startThread();
bool startThread = handler->needsOwnThread();
// extract the coordinator flag
bool found;
@ -182,7 +170,7 @@ bool GeneralServer::handleRequest(GeneralCommTask* task,
return true;
}
bool startThread = task->startThread();
bool startThread = handler->needsOwnThread();
// use a dispatcher queue, handler belongs to the job
std::unique_ptr<Job> job = std::make_unique<HttpServerJob>(this, handler);
@ -255,7 +243,7 @@ void GeneralServer::handleRequestDirectly(RestHandler* handler,
case RestHandler::status::FAILED:
case RestHandler::status::DONE: {
auto response = dynamic_cast<HttpResponse*>(handler->response());
task->handleResponse(response);
task->addResponse(response, false);
break;
}

View File

@ -53,27 +53,13 @@ class GeneralServer : protected TaskManager {
GeneralServer const& operator=(GeneralServer const&) = delete;
public:
// destroys an endpoint server
static int sendChunk(uint64_t, std::string const&);
public:
GeneralServer(bool allowMethodOverride,
std::vector<std::string> const& accessControlAllowOrigins);
GeneralServer() = default;
virtual ~GeneralServer();
public:
// returns the protocol
// virtual char const* protocol() const { return "http"; }
// check, if we allow a method override
bool allowMethodOverride() { return _allowMethodOverride; }
public:
// list of trusted origin urls for CORS
std::vector<std::string> const& trustedOrigins() const {
return _accessControlAllowOrigins;
}
// adds the endpoint list
void setEndpointList(const EndpointList* list);
@ -91,19 +77,11 @@ class GeneralServer : protected TaskManager {
// executes the handler directly or add it to the queue
bool handleRequest(GeneralCommTask*, arangodb::WorkItem::uptr<RestHandler>&);
protected:
// Handler, Job, and Task tuple
struct handler_task_job_t {
RestHandler* _handler;
GeneralCommTask* _task;
HttpServerJob* _job;
};
protected:
// opens a listen port
bool openEndpoint(Endpoint* endpoint);
// handle request directly
// handles request directly
void handleRequestDirectly(RestHandler* handler, GeneralCommTask* task);
// registers a task
@ -114,15 +92,7 @@ class GeneralServer : protected TaskManager {
std::vector<ListenTask*> _listenTasks;
// defined ports and addresses
const EndpointList* _endpointList;
// allow to override the method
bool _allowMethodOverride;
// list of trusted origin urls for CORS
std::vector<std::string> const _accessControlAllowOrigins;
private:
EndpointList const* _endpointList = nullptr;
};
}
}

View File

@ -203,8 +203,8 @@ void GeneralServerFeature::validateOptions(std::shared_ptr<ProgramOptions>) {
std::remove_if(_accessControlAllowOrigins.begin(),
_accessControlAllowOrigins.end(),
[](std::string const& value) {
return basics::StringUtils::trim(value).empty();
}),
return basics::StringUtils::trim(value).empty();
}),
_accessControlAllowOrigins.end());
}
@ -354,9 +354,7 @@ void GeneralServerFeature::buildServers() {
}
}
GeneralServer* server =
new GeneralServer(_allowMethodOverride,
_accessControlAllowOrigins);
GeneralServer* server = new GeneralServer();
server->setEndpointList(&endpointList);
_servers.push_back(server);

View File

@ -66,7 +66,7 @@ class GeneralServerFeature final
};
static bool authenticationEnabled() {
return GENERAL_SERVER != nullptr && GENERAL_SERVER->authentication();
return GENERAL_SERVER != nullptr && GENERAL_SERVER->_authentication;
}
static bool hasProxyCheck() {
@ -89,6 +89,24 @@ class GeneralServerFeature final
return GENERAL_SERVER->jwtSecret();
}
static bool allowMethodOverride() {
if (GENERAL_SERVER == nullptr) {
return false;
}
return GENERAL_SERVER->_allowMethodOverride;
}
static std::vector<std::string> const& accessControlAllowOrigins() {
static std::vector<std::string> empty;
if (GENERAL_SERVER == nullptr) {
return empty;
}
return GENERAL_SERVER->_accessControlAllowOrigins;
}
private:
static GeneralServerFeature* GENERAL_SERVER;
static const size_t _maxSecretLength = 64;

View File

@ -33,20 +33,10 @@
#include "Scheduler/SchedulerFeature.h"
#include "VocBase/ticks.h" //clock
////////////////////////////////////////////////////////////////////////////////
/// @brief reads data from the socket
////////////////////////////////////////////////////////////////////////////////
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
namespace arangodb {
class HttpRequest;
class HttpResponse;
namespace rest {
class GeneralServer;
size_t const HttpCommTask::MaximalHeaderSize = 1 * 1024 * 1024; // 1 MB
size_t const HttpCommTask::MaximalBodySize = 512 * 1024 * 1024; // 512 MB
size_t const HttpCommTask::MaximalPipelineSize = 512 * 1024 * 1024; // 512 MB
@ -60,9 +50,8 @@ HttpCommTask::HttpCommTask(GeneralServer* server, TRI_socket_t sock,
_startPosition(0),
_bodyPosition(0),
_bodyLength(0),
_closeRequested(false),
_readRequestBody(false),
_allowMethodOverride(server->allowMethodOverride()),
_allowMethodOverride(GeneralServerFeature::allowMethodOverride()),
_denyCredentials(true),
_acceptDeflate(false),
_newRequest(true),
@ -70,14 +59,19 @@ HttpCommTask::HttpCommTask(GeneralServer* server, TRI_socket_t sock,
_fullUrl(),
_origin(),
_sinceCompactification(0),
_originalBodyLength(0)
{
_originalBodyLength(0) {
_protocol = "http";
connectionStatisticsAgentSetHttp();
}
void HttpCommTask::addResponse(HttpResponse* response) {
void HttpCommTask::addResponse(HttpResponse* response, bool isError) {
_requestPending = false;
_isChunked = false;
if (isError) {
resetState(true);
}
// CORS response handling
if (!_origin.empty()) {
// the request contained an Origin header. We have to send back the
@ -109,15 +103,6 @@ void HttpCommTask::addResponse(HttpResponse* response) {
// HEAD must not return a body
response->headResponse(responseBodyLength);
}
// else {
// // to enable automatic deflating of responses, activate this.
// // deflate takes a lot of CPU time so it should only be enabled for
// // dedicated purposes and not generally
// if (responseBodyLength > 16384 && _acceptDeflate) {
// response->deflate();
// responseBodyLength = response->bodySize();
// }
// }
// reserve a buffer with some spare capacity
auto buffer = std::make_unique<StringBuffer>(TRI_UNKNOWN_MEM_ZONE,
@ -170,12 +155,9 @@ void HttpCommTask::addResponse(HttpResponse* response) {
// start output
fillWriteBuffer();
} // addResponse
////////////////////////////////////////////////////////////////////////////////
/// @brief reads data from the socket
////////////////////////////////////////////////////////////////////////////////
}
// reads data from the socket
bool HttpCommTask::processRead() {
if (_requestPending || _readBuffer->c_str() == nullptr) {
return false;
@ -303,7 +285,7 @@ bool HttpCommTask::processRead() {
// if the request asks to allow credentials, we'll check against the
// configured whitelist of origins
std::vector<std::string> const& accessControlAllowOrigins =
_server->trustedOrigins();
GeneralServerFeature::accessControlAllowOrigins();
if (!accessControlAllowOrigins.empty()) {
if (accessControlAllowOrigins[0] == "*") {
@ -314,13 +296,12 @@ bool HttpCommTask::processRead() {
if (_origin[_origin.size() - 1] == '/') {
// strip trailing slash
auto result = std::find(accessControlAllowOrigins.begin(),
accessControlAllowOrigins.end(),
_origin.substr(0, _origin.size() - 1));
accessControlAllowOrigins.end(),
_origin.substr(0, _origin.size() - 1));
_denyCredentials = (result == accessControlAllowOrigins.end());
} else {
auto result =
std::find(accessControlAllowOrigins.begin(),
accessControlAllowOrigins.end(), _origin);
auto result = std::find(accessControlAllowOrigins.begin(),
accessControlAllowOrigins.end(), _origin);
_denyCredentials = (result == accessControlAllowOrigins.end());
}
} else {
@ -440,8 +421,7 @@ bool HttpCommTask::processRead() {
}
// read "bodyLength" from read buffer and add this body to "httpRequest"
_requestAsHttp()->setBody(_readBuffer->c_str() + _bodyPosition,
_bodyLength);
requestAsHttp()->setBody(_readBuffer->c_str() + _bodyPosition, _bodyLength);
LOG(TRACE) << "" << std::string(_readBuffer->c_str() + _bodyPosition,
_bodyLength);
@ -483,7 +463,7 @@ bool HttpCommTask::processRead() {
// the connection
LOG(DEBUG) << "connection close requested by client";
_closeRequested = true;
} else if (_requestAsHttp()->isHttp10() && connectionType != "keep-alive") {
} else if (requestAsHttp()->isHttp10() && connectionType != "keep-alive") {
// HTTP 1.0 request, and no "Connection: Keep-Alive" header sent
// we should close the connection
LOG(DEBUG) << "no keep-alive, connection close requested by client";
@ -532,11 +512,11 @@ bool HttpCommTask::processRead() {
response.setHeaderNC(StaticStrings::WwwAuthenticate, std::move(realm));
clearRequest();
handleResponse(&response);
processResponse(&response);
}
return true;
} // processsRead
}
////////////////////////////////////////////////////////////////////////////////
/// @brief processes a request
@ -546,7 +526,7 @@ void HttpCommTask::processRequest() {
// check for deflate
bool found;
auto httpRequest = _requestAsHttp();
auto httpRequest = requestAsHttp();
std::string const& acceptEncoding =
httpRequest->header(StaticStrings::AcceptEncoding, found);
@ -585,84 +565,10 @@ void HttpCommTask::processRequest() {
}
}
// check for an async request
std::string const& asyncExecution =
_request->header(StaticStrings::Async, found);
// create handler, this will take over the request and the response
std::unique_ptr<HttpResponse> response(
new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR));
// execute response
WorkItem::uptr<RestHandler> handler(
GeneralServerFeature::HANDLER_FACTORY->createHandler(_request,
response.get()));
// ab hier generell
if (handler == nullptr) {
LOG(TRACE) << "no handler is known, giving up";
clearRequest();
handleSimpleError(GeneralResponse::ResponseCode::NOT_FOUND);
return;
}
response.release();
if (_request != nullptr) {
bool found;
std::string const& startThread =
_request->header(StaticStrings::StartThread, found);
if (found) {
_startThread = StringUtils::boolean(startThread);
}
}
handler->setTaskId(_taskId, _loop);
// clear request object
_request = nullptr;
// async execution
bool ok = false;
if (found && (asyncExecution == "true" || asyncExecution == "store")) {
requestStatisticsAgentSetAsync();
uint64_t jobId = 0;
if (asyncExecution == "store") {
// persist the responses
ok = _server->handleRequestAsync(this, handler, &jobId);
} else {
// don't persist the responses
ok = _server->handleRequestAsync(this, handler, nullptr);
}
if (ok) {
HttpResponse response(GeneralResponse::ResponseCode::ACCEPTED);
if (jobId > 0) {
// return the job id we just created
response.setHeaderNC(StaticStrings::AsyncId, StringUtils::itoa(jobId));
}
handleResponse(&response);
return;
}
}
// synchronous request
else {
ok = _server->handleRequest(this, handler);
}
if (!ok) {
handleSimpleError(GeneralResponse::ResponseCode::SERVER_ERROR);
}
} // processRequest
// create a handler and execute
executeRequest(_request,
new HttpResponse(GeneralResponse::ResponseCode::SERVER_ERROR));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief chunking is finished
@ -678,7 +584,6 @@ void HttpCommTask::finishedChunked() {
_writeBuffersStats.push_back(nullptr);
_isChunked = false;
_startThread = false;
_requestPending = false;
fillWriteBuffer();
@ -728,20 +633,6 @@ bool HttpCommTask::checkContentLength(bool expectContentLength) {
return true;
}
void HttpCommTask::fillWriteBuffer() {
if (!hasWriteBuffer() && !_writeBuffers.empty()) {
StringBuffer* buffer = _writeBuffers.front();
_writeBuffers.pop_front();
TRI_ASSERT(buffer != nullptr);
TRI_request_statistics_t* statistics = _writeBuffersStats.front();
_writeBuffersStats.pop_front();
setWriteBuffer(buffer, statistics);
}
}
void HttpCommTask::processCorsOptions() {
HttpResponse response(GeneralResponse::ResponseCode::OK);
@ -774,82 +665,22 @@ void HttpCommTask::processCorsOptions() {
StaticStrings::N1800);
}
clearRequest();
handleResponse(&response);
processResponse(&response);
}
void HttpCommTask::signalTask(TaskData* data) {
// data response
if (data->_type == TaskData::TASK_DATA_RESPONSE) {
data->RequestStatisticsAgent::transferTo(this);
HttpResponse* response = dynamic_cast<HttpResponse*>(data->_response.get());
if (response != nullptr) {
handleResponse(response);
processRead();
} else {
handleSimpleError(GeneralResponse::ResponseCode::SERVER_ERROR);
}
}
// data response
else if (data->_type == TaskData::TASK_DATA_BUFFER) {
data->RequestStatisticsAgent::transferTo(this);
HttpResponse response(GeneralResponse::ResponseCode::OK);
velocypack::Slice slice(data->_buffer->data());
response.setPayload(_request, slice, true, VPackOptions::Defaults);
handleResponse(&response);
processRead();
}
// data chunk
else if (data->_type == TaskData::TASK_DATA_CHUNK) {
size_t len = data->_data.size();
if (0 == len) {
finishedChunked();
} else {
StringBuffer* buffer = new StringBuffer(TRI_UNKNOWN_MEM_ZONE, len);
buffer->appendHex(len);
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
buffer->appendText(data->_data.c_str(), len);
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
sendChunk(buffer);
}
}
// do not know, what to do - give up
else {
_scheduler->destroyTask(this);
}
}
bool HttpCommTask::handleRead() {
bool res = true;
if (!_closeRequested) {
res = fillReadBuffer();
// process as much data as we got
while (processRead()) {
if (_closeRequested) {
break;
}
}
void HttpCommTask::handleChunk(char const* data, size_t len) {
if (0 == len) {
finishedChunked();
} else {
// if we don't close here, the scheduler thread may fall into a
// busy wait state, consuming 100% CPU!
_clientClosed = true;
}
StringBuffer* buffer = new StringBuffer(TRI_UNKNOWN_MEM_ZONE, len);
if (_clientClosed) {
res = false;
} else if (!res) {
_clientClosed = true;
}
buffer->appendHex(len);
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
buffer->appendText(data, len);
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
return res;
sendChunk(buffer);
}
}
void HttpCommTask::completedWriteBuffer() {
@ -913,14 +744,14 @@ void HttpCommTask::resetState(bool close) {
_newRequest = true;
_readRequestBody = false;
_startThread = false;
}
GeneralResponse::ResponseCode HttpCommTask::authenticateRequest() {
auto context = (_request == nullptr) ? nullptr : _request->requestContext();
if (context == nullptr && _request != nullptr) {
bool res = GeneralServerFeature::HANDLER_FACTORY->setRequestContext(_request);
bool res =
GeneralServerFeature::HANDLER_FACTORY->setRequestContext(_request);
if (!res) {
return GeneralResponse::ResponseCode::NOT_FOUND;
@ -948,12 +779,10 @@ void HttpCommTask::sendChunk(StringBuffer* buffer) {
}
// convert internal GeneralRequest to HttpRequest
HttpRequest* HttpCommTask::_requestAsHttp() {
HttpRequest* HttpCommTask::requestAsHttp() {
HttpRequest* request = dynamic_cast<HttpRequest*>(_request);
if (request == nullptr) {
// everything is borken FIXME
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
return request;
};
} // rest
} // arangodb

View File

@ -16,48 +16,44 @@ class HttpCommTask : public GeneralCommTask {
HttpCommTask(GeneralServer*, TRI_socket_t, ConnectionInfo&&, double timeout);
bool processRead() override;
virtual void processRequest() override;
void addResponse(GeneralResponse* response) override {
// convert from GeneralResponse to httpResponse ad dispatch request to class
// internal addResponse
// convert from GeneralResponse to httpResponse ad dispatch request to class
// internal addResponse
void addResponse(GeneralResponse* response, bool isError) override {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(response);
if (httpResponse == nullptr) {
// everything is borken
if (httpResponse != nullptr) {
addResponse(httpResponse, isError);
}
addResponse(httpResponse);
};
protected:
void handleChunk(char const*, size_t) override final;
void completedWriteBuffer() override final;
private:
void signalTask(TaskData*) override final;
// resets the internal state
// this method can be called to clean up when the request handling aborts
// prematurely
virtual void resetState(bool close) override final;
void processRequest();
// resets the internal state this method can be called to clean up when the
// request handling aborts prematurely
void resetState(bool close);
HttpRequest* _requestAsHttp();
void addResponse(HttpResponse*);
void addResponse(HttpResponse*, bool isError);
HttpRequest* requestAsHttp();
void finishedChunked();
// check the content-length header of a request and fail it is broken
bool checkContentLength(bool expectContentLength);
void fillWriteBuffer(); // fills the write buffer
void processCorsOptions(); // handles CORS options
std::string authenticationRealm() const; // returns the authentication realm
GeneralResponse::ResponseCode
authenticateRequest(); // checks the authentication
void sendChunk(basics::StringBuffer*); // sends more chunked data
bool handleRead() override final;
private:
size_t _readPosition; // current read position
size_t _startPosition; // start position of current request
size_t _bodyPosition; // start of the body position
size_t _bodyLength; // body length
bool _closeRequested; // true if a close has been requested by the client
bool _readRequestBody; // true if reading the request body
size_t _readPosition; // current read position
size_t _startPosition; // start position of current request
size_t _bodyPosition; // start of the body position
size_t _bodyLength; // body length
bool _readRequestBody; // true if reading the request body
bool _allowMethodOverride; // allow method override
bool _denyCredentials; // whether or not to allow credentialed requests (only
// CORS)
@ -73,6 +69,12 @@ class HttpCommTask : public GeneralCommTask {
// authentication real
std::string const _authenticationRealm;
// true if within a chunked response
bool _isChunked = false;
// true if request is complete but not handled
bool _requestPending = false;
};
} // rest
} // arangodb

View File

@ -39,9 +39,16 @@ std::atomic_uint_fast64_t NEXT_HANDLER_ID(
RestHandler::RestHandler(GeneralRequest* request, GeneralResponse* response)
: _handlerId(NEXT_HANDLER_ID.fetch_add(1, std::memory_order_seq_cst)),
_taskId(0),
_request(request),
_response(response) {}
_response(response) {
bool found;
std::string const& startThread =
_request->header(StaticStrings::StartThread, found);
if (found) {
_needsOwnThread = StringUtils::boolean(startThread);
}
}
RestHandler::~RestHandler() {
delete _request;

View File

@ -57,6 +57,9 @@ class RestHandler : public RequestStatisticsAgent, public arangodb::WorkItem {
// returns true if a handler is executed directly
virtual bool isDirect() const = 0;
// returns true if a handler desires to start a new dispatcher thread
virtual bool needsOwnThread() const { return _needsOwnThread; }
// returns the queue name
virtual size_t queue() const { return Dispatcher::STANDARD_QUEUE; }
@ -111,8 +114,8 @@ class RestHandler : public RequestStatisticsAgent, public arangodb::WorkItem {
// handler id
uint64_t const _handlerId;
// task id or 0
uint64_t _taskId;
// task id or (initially) 0
uint64_t _taskId = 0;
// event loop
EventLoop _loop;
@ -123,6 +126,9 @@ class RestHandler : public RequestStatisticsAgent, public arangodb::WorkItem {
// OBI-TODO make private
// the response
GeneralResponse* _response;
private:
bool _needsOwnThread = false;
};
}
}

View File

@ -0,0 +1,779 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// vpp://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Achim Brandt
////////////////////////////////////////////////////////////////////////////////
#include "VppCommTask.h"
#include "Basics/HybridLogicalClock.h"
#include "GeneralServer/GeneralServer.h"
#include "GeneralServer/GeneralServerFeature.h"
#include "GeneralServer/RestHandler.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "Scheduler/Scheduler.h"
#include "Scheduler/SchedulerFeature.h"
#include "VocBase/ticks.h"
#include <velocypack/Validator.h>
#include <velocypack/velocypack-aliases.h>
#include <stdexcept>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
VppCommTask::VppCommTask(GeneralServer* server, TRI_socket_t sock,
ConnectionInfo&& info, double timeout)
: Task("VppCommTask"),
GeneralCommTask(server, sock, std::move(info), timeout),
_requestType(GeneralRequest::RequestType::ILLEGAL),
_fullUrl() {
_protocol = "vpp";
// connectionStatisticsAgentSetVpp();
}
namespace {
constexpr size_t getChunkHeaderLength(bool oneChunk) {
return (oneChunk ? 2 * sizeof(uint32_t) + 1 * sizeof(uint64_t)
: 2 * sizeof(uint32_t) + 2 * sizeof(uint64_t));
}
}
void VppCommTask::addResponse(VppResponse* response, bool isError) {
/*
_requestPending = false;
_isChunked = false;
if (isError) {
resetState(true);
}
// CORS response handling
if (!_origin.empty()) {
// the request contained an Origin header. We have to send back the
// access-control-allow-origin header now
LOG(TRACE) << "handling CORS response";
response->setHeaderNC(StaticStrings::AccessControlExposeHeaders,
StaticStrings::ExposedCorsHeaders);
// send back original value of "Origin" header
response->setHeaderNC(StaticStrings::AccessControlAllowOrigin, _origin);
// send back "Access-Control-Allow-Credentials" header
response->setHeaderNC(StaticStrings::AccessControlAllowCredentials,
(_denyCredentials ? "false" : "true"));
}
// CORS request handling EOF
// set "connection" header
// keep-alive is the default
response->setConnectionType(_closeRequested
? VppResponse::CONNECTION_CLOSE
: VppResponse::CONNECTION_KEEP_ALIVE);
size_t const responseBodyLength = response->bodySize();
if (_requestType == GeneralRequest::RequestType::HEAD) {
// clear body if this is an vpp HEAD request
// HEAD must not return a body
response->headResponse(responseBodyLength);
}
// reserve a buffer with some spare capacity
auto buffer = std::make_unique<StringBuffer>(TRI_UNKNOWN_MEM_ZONE,
responseBodyLength + 128, false);
// write header
response->writeHeader(buffer.get());
// write body
if (_requestType != GeneralRequest::RequestType::HEAD) {
if (_isChunked) {
if (0 != responseBodyLength) {
buffer->appendHex(response->body().length());
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
buffer->appendText(response->body());
buffer->appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
}
} else {
buffer->appendText(response->body());
}
}
buffer->ensureNullTerminated();
_writeBuffers.push_back(buffer.get());
auto b = buffer.release();
if (!b->empty()) {
LOG_TOPIC(TRACE, Logger::REQUESTS)
<< "\"vpp-request-response\",\"" << (void*)this << "\",\""
<< StringUtils::escapeUnicode(std::string(b->c_str(), b->length()))
<< "\"";
}
// clear body
response->body().clear();
double const totalTime = RequestStatisticsAgent::elapsedSinceReadStart();
_writeBuffersStats.push_back(RequestStatisticsAgent::steal());
LOG_TOPIC(INFO, Logger::REQUESTS)
<< "\"vpp-request-end\",\"" << (void*)this << "\",\""
<< _connectionInfo.clientAddress << "\",\""
<< VppRequest::translateMethod(_requestType) << "\",\""
<< VppRequest::translateVersion(_protocolVersion) << "\","
<< static_cast<int>(response->responseCode()) << ","
<< _originalBodyLength << "," << responseBodyLength << ",\"" << _fullUrl
<< "\"," << Logger::FIXED(totalTime, 6);
// start output
fillWriteBuffer();
*/
}
VppCommTask::ChunkHeader VppCommTask::readChunkHeader() {
VppCommTask::ChunkHeader header;
auto cursor = _readBuffer->begin();
uint32_t vpChunkLength;
std::memcpy(&header._length, cursor, sizeof(header._length));
cursor += sizeof(header._length);
uint32_t chunkX;
std::memcpy(&chunkX, cursor, sizeof(chunkX));
cursor += sizeof(chunkX);
header._isFirst = chunkX & 0x1;
header._chunk = chunkX >> 1;
std::memcpy(&header._messageId, cursor, sizeof(header._messageId));
return header;
}
bool VppCommTask::chunkComplete() {
auto start = _readBuffer->begin();
std::size_t length = std::distance(start, _readBuffer->end());
auto& prv = _processReadVariables;
if (!prv._currentChunkLength && length < sizeof(uint32_t)) {
return false;
}
if (!prv._currentChunkLength) {
// read chunk length
std::memcpy(&prv._currentChunkLength, start, sizeof(uint32_t));
}
if (length < prv._currentChunkLength) {
// chunk not complete
return false;
}
return true;
}
std::pair<std::size_t, std::size_t> VppCommTask::validateChunkOnBuffer(
std::size_t chunkLength) {
auto readBufferStart = _readBuffer->begin();
auto chunkHeaderLength = getChunkHeaderLength(true);
auto sliceStart = readBufferStart + chunkHeaderLength;
auto sliceLength = chunkLength - chunkHeaderLength;
VPackValidator validator;
// check for slice start to the end of Chunk
// isSubPart allows the slice to be shorter than the checked buffer.
validator.validate(sliceStart, sliceLength, /*isSubPart =*/true);
VPackSlice vppHeader(sliceStart);
auto vppHeaderLen = vppHeader.byteSize();
sliceStart += vppHeaderLen;
sliceLength += vppHeaderLen;
validator.validate(sliceStart, sliceLength, /*isSubPart =*/true);
VPackSlice vppPayload(sliceStart);
auto vppPayloadLen = vppPayload.byteSize();
return std::pair<std::size_t, std::size_t>(vppHeaderLen,
vppHeaderLen + vppPayloadLen);
}
// reads data from the socket
bool VppCommTask::processRead() {
auto readBufferStart = _readBuffer->begin();
if (readBufferStart == nullptr || !chunkComplete()) {
return true; // no data or incomplete
}
auto header = readChunkHeader();
if (header._isFirst && header._chunk == 1) { // one chunk one message
validateChunkOnBuffer(header._length);
VPackMessage message;
// execute
}
auto incompleteMessageItr = _incompleteMessages.find(header._messageId);
if (header._isFirst) { // first chunk of multi chunk message
if (incompleteMessageItr != _incompleteMessages.end()) {
throw std::logic_error(
"Message should be first but is already in the Map of incomplete "
"messages");
}
// append to VPackBuffer in incomplete Message
auto numberOfChunks = header._chunk;
auto chunkNumber = 1;
// set message length
} else { // followup chunk of some mesage
if (incompleteMessageItr == _incompleteMessages.end()) {
throw std::logic_error("found message without previous part");
}
auto& im = incompleteMessageItr->second; // incomplete Message
auto chunkNumber = header._chunk;
// append to VPackBuffer in incomplete Message
if (chunkNumber == im._numberOfChunks) {
// VPackMessage message;
// execute
}
}
// clean buffer up to lenght of chunk
return true;
/*
if (RequestStatisticsAgent::_statistics != nullptr) {
RequestStatisticsAgent::_statistics->_id = (void*)this;
}
#endif
_newRequest = false;
_startPosition = _readPosition;
_protocolVersion = GeneralRequest::ProtocolVersion::UNKNOWN;
_requestType = GeneralRequest::RequestType::ILLEGAL;
_fullUrl = "";
_denyCredentials = true;
_acceptDeflate = false;
_sinceCompactification++;
}
char const* end = etr - 3;
// read buffer contents are way to small. we can exit here directly
if (ptr >= end) {
return false;
}
// request started
requestStatisticsAgentSetReadStart();
// check for the end of the request
for (; ptr < end; ptr++) {
if (ptr[0] == '\r' && ptr[1] == '\n' && ptr[2] == '\r' &&
ptr[3] == '\n') {
break;
}
}
// check if header is too large
size_t headerLength = ptr - (_readBuffer->c_str() + _startPosition);
if (headerLength > MaximalHeaderSize) {
LOG(WARN) << "maximal header size is " << MaximalHeaderSize
<< ", request header size is " << headerLength;
// header is too large
handleSimpleError(
GeneralResponse::ResponseCode::REQUEST_HEADER_FIELDS_TOO_LARGE);
return false;
}
// header is complete
if (ptr < end) {
_readPosition = ptr - _readBuffer->c_str() + 4;
LOG(TRACE) << "vpp READ FOR " << (void*)this << ": "
<< std::string(_readBuffer->c_str() + _startPosition,
_readPosition - _startPosition);
// check that we know, how to serve this request and update the
connection
// information, i. e. client and server addresses and ports and create a
// request context for that request
_request = new VppRequest(
_connectionInfo, _readBuffer->c_str() + _startPosition,
_readPosition - _startPosition, _allowMethodOverride);
GeneralServerFeature::HANDLER_FACTORY->setRequestContext(_request);
_request->setClientTaskId(_taskId);
// check vpp protocol version
_protocolVersion = _request->protocolVersion();
if (_protocolVersion != GeneralRequest::ProtocolVersion::vpp_1_0 &&
_protocolVersion != GeneralRequest::ProtocolVersion::vpp_1_1) {
handleSimpleError(
GeneralResponse::ResponseCode::vpp_VERSION_NOT_SUPPORTED);
return false;
}
// check max URL length
_fullUrl = _request->fullUrl();
if (_fullUrl.size() > 16384) {
handleSimpleError(GeneralResponse::ResponseCode::REQUEST_URI_TOO_LONG);
return false;
}
// update the connection information, i. e. client and server addresses
// and ports
_request->setProtocol(_protocol);
LOG(TRACE) << "server port " << _connectionInfo.serverPort
<< ", client port " << _connectionInfo.clientPort;
// set body start to current position
_bodyPosition = _readPosition;
_bodyLength = 0;
// keep track of the original value of the "origin" request header (if
// any), we need this value to handle CORS requests
_origin = _request->header(StaticStrings::Origin);
if (!_origin.empty()) {
// check for Access-Control-Allow-Credentials header
bool found;
std::string const& allowCredentials = _request->header(
StaticStrings::AccessControlAllowCredentials, found);
if (found) {
// default is to allow nothing
_denyCredentials = true;
// if the request asks to allow credentials, we'll check against the
// configured whitelist of origins
std::vector<std::string> const& accessControlAllowOrigins =
GeneralServerFeature::accessControlAllowOrigins();
if (StringUtils::boolean(allowCredentials) &&
!accessControlAllowOrigins.empty()) {
if (accessControlAllowOrigins[0] == "*") {
// special case: allow everything
_denyCredentials = false;
} else if (!_origin.empty()) {
// copy origin string
if (_origin[_origin.size() - 1] == '/') {
// strip trailing slash
auto result = std::find(accessControlAllowOrigins.begin(),
accessControlAllowOrigins.end(),
_origin.substr(0, _origin.size() -
1));
_denyCredentials = (result ==
accessControlAllowOrigins.end());
} else {
auto result =
std::find(accessControlAllowOrigins.begin(),
accessControlAllowOrigins.end(), _origin);
_denyCredentials = (result ==
accessControlAllowOrigins.end());
}
} else {
TRI_ASSERT(_denyCredentials);
}
}
}
}
// store the original request's type. we need it later when responding
// (original request object gets deleted before responding)
_requestType = _request->requestType();
requestStatisticsAgentSetRequestType(_requestType);
// handle different vpp methods
switch (_requestType) {
case GeneralRequest::RequestType::GET:
case GeneralRequest::RequestType::DELETE_REQ:
case GeneralRequest::RequestType::HEAD:
case GeneralRequest::RequestType::OPTIONS:
case GeneralRequest::RequestType::POST:
case GeneralRequest::RequestType::PUT:
case GeneralRequest::RequestType::PATCH: {
// technically, sending a body for an vpp DELETE request is not
// forbidden, but it is not explicitly supported
bool const expectContentLength =
(_requestType == GeneralRequest::RequestType::POST ||
_requestType == GeneralRequest::RequestType::PUT ||
_requestType == GeneralRequest::RequestType::PATCH ||
_requestType == GeneralRequest::RequestType::OPTIONS ||
_requestType == GeneralRequest::RequestType::DELETE_REQ);
if (!checkContentLength(expectContentLength)) {
return false;
}
if (_bodyLength == 0) {
handleRequest = true;
}
break;
}
default: {
size_t l = _readPosition - _startPosition;
if (6 < l) {
l = 6;
}
LOG(WARN) << "got corrupted vpp request '"
<< std::string(_readBuffer->c_str() + _startPosition, l)
<< "'";
// force a socket close, response will be ignored!
TRI_CLOSE_SOCKET(_commSocket);
TRI_invalidatesocket(&_commSocket);
// bad request, method not allowed
handleSimpleError(GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED);
return false;
}
}
//
.............................................................................
// check if server is active
//
.............................................................................
Scheduler const* scheduler = SchedulerFeature::SCHEDULER;
if (scheduler != nullptr && !scheduler->isActive()) {
// server is inactive and will intentionally respond with vpp 503
LOG(TRACE) << "cannot serve request - server is inactive";
handleSimpleError(GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE);
return false;
}
// check for a 100-continue
if (_readRequestBody) {
bool found;
std::string const& expect =
_request->header(StaticStrings::Expect, found);
if (found && StringUtils::trim(expect) == "100-continue") {
LOG(TRACE) << "received a 100-continue request";
auto buffer = std::make_unique<StringBuffer>(TRI_UNKNOWN_MEM_ZONE);
buffer->appendText(
TRI_CHAR_LENGTH_PAIR("vpp/1.1 100 (Continue)\r\n\r\n"));
buffer->ensureNullTerminated();
_writeBuffers.push_back(buffer.get());
buffer.release();
_writeBuffersStats.push_back(nullptr);
fillWriteBuffer();
}
}
} else {
size_t l = (_readBuffer->end() - _readBuffer->c_str());
if (_startPosition + 4 <= l) {
_readPosition = l - 4;
}
}
}
// readRequestBody might have changed, so cannot use else
if (_readRequestBody) {
if (_readBuffer->length() - _bodyPosition < _bodyLength) {
setKeepAliveTimeout(_keepAliveTimeout);
// let client send more
return false;
}
// read "bodyLength" from read buffer and add this body to "vppRequest"
requestAsVpp()->setBody(_readBuffer->c_str() + _bodyPosition,
_bodyLength);
LOG(TRACE) << "" << std::string(_readBuffer->c_str() + _bodyPosition,
_bodyLength);
// remove body from read buffer and reset read position
_readRequestBody = false;
handleRequest = true;
}
//
.............................................................................
// request complete
//
// we have to delete request in here or pass it to a handler, which will
// delete
// it
//
.............................................................................
if (!handleRequest) {
return false;
}
requestStatisticsAgentSetReadEnd();
requestStatisticsAgentAddReceivedBytes(_bodyPosition - _startPosition +
_bodyLength);
bool const isOptionsRequest =
(_requestType == GeneralRequest::RequestType::OPTIONS);
resetState(false);
//
.............................................................................
// keep-alive handling
//
.............................................................................
std::string connectionType =
StringUtils::tolower(_request->header(StaticStrings::Connection));
if (connectionType == "close") {
// client has sent an explicit "Connection: Close" header. we should close
// the connection
LOG(DEBUG) << "connection close requested by client";
_closeRequested = true;
} else if (requestAsVpp()->isVpp10() && connectionType != "keep-alive") {
// vpp 1.0 request, and no "Connection: Keep-Alive" header sent
// we should close the connection
LOG(DEBUG) << "no keep-alive, connection close requested by client";
_closeRequested = true;
} else if (_keepAliveTimeout <= 0.0) {
// if keepAliveTimeout was set to 0.0, we'll close even keep-alive
// connections immediately
LOG(DEBUG) << "keep-alive disabled by admin";
_closeRequested = true;
}
// we keep the connection open in all other cases (vpp 1.1 or Keep-Alive
// header sent)
//
.............................................................................
// authenticate
//
.............................................................................
GeneralResponse::ResponseCode authResult = authenticateRequest();
// authenticated or an OPTIONS request. OPTIONS requests currently go
// unauthenticated
if (authResult == GeneralResponse::ResponseCode::OK || isOptionsRequest) {
// handle vpp OPTIONS requests directly
if (isOptionsRequest) {
processCorsOptions();
} else {
processRequest();
}
}
// not found
else if (authResult == GeneralResponse::ResponseCode::NOT_FOUND) {
handleSimpleError(authResult, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND,
TRI_errno_string(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND));
}
// forbidden
else if (authResult == GeneralResponse::ResponseCode::FORBIDDEN) {
handleSimpleError(authResult, TRI_ERROR_USER_CHANGE_PASSWORD,
"change password");
}
// not authenticated
else {
VppResponse response(GeneralResponse::ResponseCode::UNAUTHORIZED);
std::string realm = "Bearer token_type=\"JWT\", realm=\"ArangoDB\"";
response.setHeaderNC(StaticStrings::WwwAuthenticate, std::move(realm));
clearRequest();
processResponse(&response);
}
*/
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief processes a request
////////////////////////////////////////////////////////////////////////////////
void VppCommTask::processRequest() {
/*
// check for deflate
bool found;
auto vppRequest = requestAsVpp();
std::string const& acceptEncoding =
vppRequest->header(StaticStrings::AcceptEncoding, found);
if (found) {
if (acceptEncoding.find("deflate") != std::string::npos) {
_acceptDeflate = true;
}
}
if (vppRequest != nullptr) {
LOG_TOPIC(DEBUG, Logger::REQUESTS)
<< "\"vpp-request-begin\",\"" << (void*)this << "\",\""
<< _connectionInfo.clientAddress << "\",\""
<< VppRequest::translateMethod(_requestType) << "\",\""
<< VppRequest::translateVersion(_protocolVersion) << "\"," << _fullUrl
<< "\"";
std::string const& body = vppRequest->body();
if (!body.empty()) {
LOG_TOPIC(DEBUG, Logger::REQUESTS)
<< "\"vpp-request-body\",\"" << (void*)this << "\",\""
<< (StringUtils::escapeUnicode(body)) << "\"";
}
}
// check for an HLC time stamp
std::string const& timeStamp =
_request->header(StaticStrings::HLCHeader, found);
if (found) {
uint64_t timeStampInt =
arangodb::basics::HybridLogicalClock::decodeTimeStampWithCheck(
timeStamp);
if (timeStampInt != 0) {
TRI_HybridLogicalClock(timeStampInt);
}
}
// create a handler and execute
executeRequest(_request,
new
VppResponse(GeneralResponse::ResponseCode::SERVER_ERROR));
*/
}
void VppCommTask::completedWriteBuffer() {
// _writeBuffer = nullptr;
// _writeLength = 0;
//
// if (_writeBufferStatistics != nullptr) {
// _writeBufferStatistics->_writeEnd = TRI_StatisticsTime();
// TRI_ReleaseRequestStatistics(_writeBufferStatistics);
// _writeBufferStatistics = nullptr;
// }
//
// fillWriteBuffer();
//
// if (!_clientClosed && _closeRequested && !hasWriteBuffer() &&
// _writeBuffers.empty() && !_isChunked) {
// _clientClosed = true;
// }
}
void VppCommTask::resetState(bool close) {
/*
if (close) {
clearRequest();
_requestPending = false;
_isChunked = false;
_closeRequested = true;
_readPosition = 0;
_bodyPosition = 0;
_bodyLength = 0;
} else {
_requestPending = true;
bool compact = false;
if (_sinceCompactification > RunCompactEvery) {
compact = true;
} else if (_readBuffer->length() > MaximalPipelineSize) {
compact = true;
}
if (compact) {
_readBuffer->erase_front(_bodyPosition + _bodyLength);
_sinceCompactification = 0;
_readPosition = 0;
} else {
_readPosition = _bodyPosition + _bodyLength;
if (_readPosition == _readBuffer->length()) {
_sinceCompactification = 0;
_readPosition = 0;
_readBuffer->reset();
}
}
_bodyPosition = 0;
_bodyLength = 0;
}
_newRequest = true;
_readRequestBody = false;
*/
}
// GeneralResponse::ResponseCode VppCommTask::authenticateRequest() {
// auto context = (_request == nullptr) ? nullptr :
// _request->requestContext();
//
// if (context == nullptr && _request != nullptr) {
// bool res =
// GeneralServerFeature::HANDLER_FACTORY->setRequestContext(_request);
//
// if (!res) {
// return GeneralResponse::ResponseCode::NOT_FOUND;
// }
//
// context = _request->requestContext();
// }
//
// if (context == nullptr) {
// return GeneralResponse::ResponseCode::SERVER_ERROR;
// }
//
// return context->authenticate();
// }
// convert internal GeneralRequest to VppRequest
VppRequest* VppCommTask::requestAsVpp() {
VppRequest* request = dynamic_cast<VppRequest*>(_request);
if (request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
return request;
};

View File

@ -0,0 +1,88 @@
#ifndef ARANGOD_GENERAL_SERVER_VPP_COMM_TASK_H
#define ARANGOD_GENERAL_SERVER_VPP_COMM_TASK_H 1
#include "GeneralServer/GeneralCommTask.h"
#include "lib/Rest/VppResponse.h"
#include "lib/Rest/VppRequest.h"
namespace arangodb {
namespace rest {
struct VPackMessage {
uint32_t _length; // lenght of total message in bytes
VPackBuffer<uint8_t> _buffer;
VPackSlice _header;
VPackSlice _payload;
};
class VppCommTask : public GeneralCommTask {
public:
VppCommTask(GeneralServer*, TRI_socket_t, ConnectionInfo&&, double timeout);
// read data check if chunk and message are complete
// if message is complete exectue a request
bool processRead() override;
// convert from GeneralResponse to vppResponse ad dispatch request to class
// internal addResponse
void addResponse(GeneralResponse* response, bool isError) override {
VppResponse* vppResponse = dynamic_cast<VppResponse*>(response);
if (vppResponse != nullptr) {
addResponse(vppResponse, isError);
}
// else throw? do nothing?!!??!!
};
protected:
void completedWriteBuffer() override final;
private:
void processRequest();
// resets the internal state this method can be called to clean up when the
// request handling aborts prematurely
void resetState(bool close);
void addResponse(VppResponse*, bool isError);
VppRequest* requestAsVpp();
private:
using MessageID = uint64_t;
struct IncompleteVPackMessage {
uint32_t _length; // lenght of total message in bytes
std::size_t _numberOfChunks;
VPackBuffer<uint8_t> _chunks;
std::vector<std::pair<std::size_t, std::size_t>> _chunkOffesesAndLengths;
std::vector<std::size_t> _vpackOffsets; // offset to slice in buffer
};
std::unordered_map<MessageID, IncompleteVPackMessage> _incompleteMessages;
struct ProcessReadVariables {
bool _currentChunkLength; // size of chunk processed or 0 when expectiong
// new chunk
};
struct ChunkHeader {
uint32_t _length;
uint32_t _chunk;
uint64_t _messageId;
bool _isFirst;
};
bool chunkComplete(); // subfunction of processRead
ChunkHeader readChunkHeader(); // subfuncaion of processRead
// validates chunk on read _readBuffer and returns
// offsets to Payload VPack and The End of Message.
std::pair<std::size_t, std::size_t> validateChunkOnBuffer(std::size_t);
ProcessReadVariables _processReadVariables;
GeneralRequest::RequestType _requestType; // type of request (GET, POST, ...)
std::string _fullUrl; // value of requested URL
// user
// authenticated or not
// database aus url
};
} // rest
} // arangodb
#endif

View File

@ -93,8 +93,8 @@ static int CompareKeyElement(VPackSlice const* left,
TRI_ASSERT(nullptr != left);
TRI_ASSERT(nullptr != right);
auto rightSubobjects = right->subObjects();
return arangodb::basics::VelocyPackHelper::compare(*left,
rightSubobjects[rightPosition].slice(right->document()), true);
return arangodb::basics::VelocyPackHelper::compare(
*left, rightSubobjects[rightPosition].slice(right->document()), true);
}
////////////////////////////////////////////////////////////////////////////////
@ -115,30 +115,25 @@ static int CompareElementElement(TRI_index_element_t const* left,
return arangodb::basics::VelocyPackHelper::compare(l, r, true);
}
bool BaseSkiplistLookupBuilder::isEquality() const {
return _isEquality;
}
bool BaseSkiplistLookupBuilder::isEquality() const { return _isEquality; }
VPackSlice const* BaseSkiplistLookupBuilder::getLowerLookup() const {
return &_lowerSlice;
}
bool BaseSkiplistLookupBuilder::includeLower() const {
return _includeLower;
}
bool BaseSkiplistLookupBuilder::includeLower() const { return _includeLower; }
VPackSlice const* BaseSkiplistLookupBuilder::getUpperLookup() const {
return &_upperSlice;
}
bool BaseSkiplistLookupBuilder::includeUpper() const {
return _includeUpper;
}
bool BaseSkiplistLookupBuilder::includeUpper() const { return _includeUpper; }
SkiplistLookupBuilder::SkiplistLookupBuilder(
Transaction* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>& ops,
arangodb::aql::Variable const* var, bool reverse) : BaseSkiplistLookupBuilder(trx) {
arangodb::aql::Variable const* var, bool reverse)
: BaseSkiplistLookupBuilder(trx) {
_lowerBuilder->openArray();
if (ops.empty()) {
// We only use this skiplist to sort. use empty array for lookup
@ -167,7 +162,7 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
TRI_ASSERT(op->numMembers() == 2);
auto value = op->getMember(0);
if (value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var) {
paramPair.first == var) {
value = op->getMember(1);
TRI_ASSERT(!(value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var));
@ -191,7 +186,7 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
auto value = op->getMember(0);
if (value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var) {
paramPair.first == var) {
value = op->getMember(1);
TRI_ASSERT(!(value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var));
@ -199,28 +194,28 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
}
switch (op->type) {
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT:
if (isReverseOrder) {
if (isReverseOrder) {
_includeLower = false;
} else {
_includeUpper = false;
}
// Fall through intentional
// Fall through intentional
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE:
if (isReverseOrder) {
if (isReverseOrder) {
value->toVelocyPackValue(*(_lowerBuilder.get()));
} else {
value->toVelocyPackValue(*(_upperBuilder.get()));
}
break;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GT:
if (isReverseOrder) {
if (isReverseOrder) {
_includeUpper = false;
} else {
_includeLower = false;
}
// Fall through intentional
// Fall through intentional
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE:
if (isReverseOrder) {
if (isReverseOrder) {
value->toVelocyPackValue(*(_upperBuilder.get()));
} else {
value->toVelocyPackValue(*(_lowerBuilder.get()));
@ -244,7 +239,7 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
TRI_ASSERT(op->numMembers() == 2);
auto value = op->getMember(0);
if (value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var) {
paramPair.first == var) {
value = op->getMember(1);
TRI_ASSERT(!(value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var));
@ -268,14 +263,15 @@ SkiplistLookupBuilder::SkiplistLookupBuilder(
bool SkiplistLookupBuilder::next() {
// The first search value is created during creation.
// So next is always false.
return false;
return false;
}
SkiplistInLookupBuilder::SkiplistInLookupBuilder(
Transaction* trx,
std::vector<std::vector<arangodb::aql::AstNode const*>>& ops,
arangodb::aql::Variable const* var, bool reverse) : BaseSkiplistLookupBuilder(trx), _dataBuilder(trx), _done(false) {
TRI_ASSERT(!ops.empty()); // We certainly do not need IN here
arangodb::aql::Variable const* var, bool reverse)
: BaseSkiplistLookupBuilder(trx), _dataBuilder(trx), _done(false) {
TRI_ASSERT(!ops.empty()); // We certainly do not need IN here
TransactionBuilderLeaser tmp(trx);
std::set<VPackSlice, arangodb::basics::VelocyPackHelper::VPackSorted<true>>
unique_set(
@ -302,7 +298,6 @@ SkiplistInLookupBuilder::SkiplistInLookupBuilder(
paramPair.first == var));
}
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN) {
if (valueLeft) {
// Case: value IN x.a
// This is identical to == for the index.
@ -343,7 +338,7 @@ SkiplistInLookupBuilder::SkiplistInLookupBuilder(
auto value = op->getMember(0);
if (value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var) {
paramPair.first == var) {
value = op->getMember(1);
TRI_ASSERT(!(value->isAttributeAccessForVariable(paramPair) &&
paramPair.first == var));
@ -352,14 +347,14 @@ SkiplistInLookupBuilder::SkiplistInLookupBuilder(
switch (op->type) {
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT:
if (isReverseOrder) {
if (isReverseOrder) {
_includeLower = false;
} else {
_includeUpper = false;
}
// Fall through intentional
// Fall through intentional
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE:
if (isReverseOrder) {
if (isReverseOrder) {
TRI_ASSERT(lower == nullptr);
lower = value;
} else {
@ -368,14 +363,14 @@ SkiplistInLookupBuilder::SkiplistInLookupBuilder(
}
break;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GT:
if (isReverseOrder) {
if (isReverseOrder) {
_includeUpper = false;
} else {
_includeLower = false;
}
// Fall through intentional
// Fall through intentional
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE:
if (isReverseOrder) {
if (isReverseOrder) {
TRI_ASSERT(upper == nullptr);
upper = value;
} else {
@ -680,7 +675,8 @@ void SkiplistIterator2::initNextInterval() {
leftBorder = leftBorder->nextNode();
if (_builder->includeUpper()) {
rightBorder = _skiplistIndex->rightKeyLookup(_builder->getUpperLookup());
rightBorder =
_skiplistIndex->rightKeyLookup(_builder->getUpperLookup());
} else {
rightBorder = _skiplistIndex->leftKeyLookup(_builder->getUpperLookup());
}
@ -812,7 +808,7 @@ int SkiplistIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const* doc,
_skiplistIndex->remove(elements[j]);
// No need to free elements[j] skiplist has taken over already
}
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
// We ignore unique_constraint violated if we are not unique
res = TRI_ERROR_NO_ERROR;
@ -939,7 +935,7 @@ SkiplistIterator* SkiplistIndex::lookup(arangodb::Transaction* trx,
TransactionBuilderLeaser rightSearch(trx);
*(rightSearch.builder()) = *leftSearch.builder();
// Define Lower-Bound
// Define Lower-Bound
VPackSlice lastLeft = lastNonEq.get(StaticStrings::IndexGe);
if (!lastLeft.isNone()) {
TRI_ASSERT(!lastNonEq.hasKey(StaticStrings::IndexGt));
@ -949,7 +945,8 @@ SkiplistIterator* SkiplistIndex::lookup(arangodb::Transaction* trx,
leftBorder = _skiplistIndex->leftKeyLookup(&search);
// leftKeyLookup guarantees that we find the element before search. This
// should not be in the cursor, but the next one
// This is also save for the startNode, it should never be contained in the index.
// This is also save for the startNode, it should never be contained in
// the index.
leftBorder = leftBorder->nextNode();
} else {
lastLeft = lastNonEq.get(StaticStrings::IndexGt);
@ -1029,8 +1026,7 @@ SkiplistIterator* SkiplistIndex::lookup(arangodb::Transaction* trx,
////////////////////////////////////////////////////////////////////////////////
int SkiplistIndex::KeyElementComparator::operator()(
VPackSlice const* leftKey,
TRI_index_element_t const* rightElement) const {
VPackSlice const* leftKey, TRI_index_element_t const* rightElement) const {
TRI_ASSERT(nullptr != leftKey);
TRI_ASSERT(nullptr != rightElement);
@ -1041,8 +1037,7 @@ int SkiplistIndex::KeyElementComparator::operator()(
size_t numFields = leftKey->length();
for (size_t j = 0; j < numFields; j++) {
VPackSlice field = leftKey->at(j);
int compareResult =
CompareKeyElement(&field, rightElement, j);
int compareResult = CompareKeyElement(&field, rightElement, j);
if (compareResult != 0) {
return compareResult;
}
@ -1073,8 +1068,7 @@ int SkiplistIndex::ElementElementComparator::operator()(
}
for (size_t j = 0; j < _idx->numPaths(); j++) {
int compareResult =
CompareElementElement(leftElement, j, rightElement, j);
int compareResult = CompareElementElement(leftElement, j, rightElement, j);
if (compareResult != 0) {
return compareResult;
@ -1094,9 +1088,11 @@ int SkiplistIndex::ElementElementComparator::operator()(
}
// We break this tie in the key comparison by looking at the key:
VPackSlice leftKey = VPackSlice(leftElement->document()->vpack()).get(StaticStrings::KeyString);
VPackSlice rightKey = VPackSlice(rightElement->document()->vpack()).get(StaticStrings::KeyString);
VPackSlice leftKey = VPackSlice(leftElement->document()->vpack())
.get(StaticStrings::KeyString);
VPackSlice rightKey = VPackSlice(rightElement->document()->vpack())
.get(StaticStrings::KeyString);
int compareResult = leftKey.compareString(rightKey.copyString());
if (compareResult < 0) {
@ -1244,7 +1240,6 @@ void SkiplistIndex::matchAttributes(
}
}
bool SkiplistIndex::accessFitsIndex(
arangodb::aql::AstNode const* access, arangodb::aql::AstNode const* other,
arangodb::aql::AstNode const* op, arangodb::aql::Variable const* reference,
@ -1360,8 +1355,7 @@ bool SkiplistIndex::findMatchingConditions(
}
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN: {
auto m = op->getMember(1);
if (accessFitsIndex(op->getMember(0), m, op, reference,
mapping)) {
if (accessFitsIndex(op->getMember(0), m, op, reference, mapping)) {
if (m->numMembers() == 0) {
// We want to do an IN [].
// No results
@ -1395,7 +1389,7 @@ bool SkiplistIndex::findMatchingConditions(
if (first->getMember(1)->isArray()) {
usesIn = true;
}
//Fall through intentional
// Fall through intentional
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_EQ:
TRI_ASSERT(conditions.size() == 1);
break;
@ -1418,7 +1412,6 @@ bool SkiplistIndex::findMatchingConditions(
return true;
}
IndexIterator* SkiplistIndex::iteratorForCondition(
arangodb::Transaction* trx, IndexIteratorContext*,
arangodb::aql::AstNode const* node,
@ -1426,7 +1419,8 @@ IndexIterator* SkiplistIndex::iteratorForCondition(
std::vector<std::vector<arangodb::aql::AstNode const*>> mapping;
bool usesIn = false;
if (node != nullptr) {
mapping.resize(_fields.size()); // We use the default constructor. Mapping will have _fields many entries.
mapping.resize(_fields.size()); // We use the default constructor. Mapping
// will have _fields many entries.
TRI_ASSERT(mapping.size() == _fields.size());
if (!findMatchingConditions(node, reference, mapping, usesIn)) {
return new EmptyIndexIterator();
@ -1442,17 +1436,17 @@ IndexIterator* SkiplistIndex::iteratorForCondition(
}
if (usesIn) {
auto builder =
std::make_unique<SkiplistInLookupBuilder>(trx, mapping, reference, reverse);
return new SkiplistIterator2(_skiplistIndex, CmpElmElm, reverse, builder.release());
auto builder = std::make_unique<SkiplistInLookupBuilder>(
trx, mapping, reference, reverse);
return new SkiplistIterator2(_skiplistIndex, CmpElmElm, reverse,
builder.release());
}
auto builder =
std::make_unique<SkiplistLookupBuilder>(trx, mapping, reference, reverse);
return new SkiplistIterator2(_skiplistIndex, CmpElmElm, reverse, builder.release());
return new SkiplistIterator2(_skiplistIndex, CmpElmElm, reverse,
builder.release());
}
bool SkiplistIndex::supportsFilterCondition(
arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, size_t itemsInIndex,

View File

@ -31,6 +31,8 @@
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include <iostream>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
@ -47,11 +49,15 @@ RestBatchHandler::~RestBatchHandler() {}
RestHandler::status RestBatchHandler::execute() {
// TODO OBI - generalize function
if (_response == nullptr) {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response);
if (httpResponse == nullptr) {
std::cout << "please fix this for vpack" << std::endl;
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request);
if (_request == nullptr) {
std::cout << "please fix this for vpack" << std::endl;
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
@ -161,14 +167,14 @@ RestHandler::status RestBatchHandler::execute() {
if (bodyLength > 0) {
LOG(TRACE) << "part body is '" << std::string(bodyStart, bodyLength)
<< "'";
request->setBody(bodyStart, bodyLength);
httpRequest->setBody(bodyStart, bodyLength);
}
if (!authorization.empty()) {
// inject Authorization header of multipart message into part message
request->setHeader(StaticStrings::Authorization.c_str(),
StaticStrings::Authorization.size(),
authorization.c_str(), authorization.size());
httpRequest->setHeader(StaticStrings::Authorization.c_str(),
StaticStrings::Authorization.size(),
authorization.c_str(), authorization.size());
}
RestHandler* handler = nullptr;
@ -203,7 +209,8 @@ RestHandler::status RestBatchHandler::execute() {
return status::FAILED;
}
GeneralResponse* partResponse = handler->response();
HttpResponse* partResponse =
dynamic_cast<HttpResponse*>(handler->response());
if (partResponse == nullptr) {
generateError(GeneralResponse::ResponseCode::BAD, TRI_ERROR_INTERNAL,
@ -220,28 +227,28 @@ RestHandler::status RestBatchHandler::execute() {
}
// append the boundary for this subpart
_response->body().appendText(boundary + "\r\nContent-Type: ");
_response->body().appendText(StaticStrings::BatchContentType);
httpResponse->body().appendText(boundary + "\r\nContent-Type: ");
httpResponse->body().appendText(StaticStrings::BatchContentType);
// append content-id if it is present
if (helper.contentId != 0) {
_response->body().appendText(
httpResponse->body().appendText(
"\r\nContent-Id: " +
std::string(helper.contentId, helper.contentIdLength));
}
_response->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n\r\n"));
httpResponse->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n\r\n"));
// remove some headers we don't need
partResponse->setConnectionType(HttpResponse::CONNECTION_NONE);
partResponse->setHeaderNC(StaticStrings::Server, "");
// append the part response header
partResponse->writeHeader(&_response->body());
partResponse->writeHeader(&httpResponse->body());
// append the part response body
_response->body().appendText(partResponse->body());
_response->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
httpResponse->body().appendText(partResponse->body());
httpResponse->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n"));
}
// we've read the last part
@ -251,10 +258,10 @@ RestHandler::status RestBatchHandler::execute() {
}
// append final boundary + "--"
_response->body().appendText(boundary + "--");
httpResponse->body().appendText(boundary + "--");
if (errors > 0) {
_response->setHeaderNC(StaticStrings::Errors, StringUtils::itoa(errors));
httpResponse->setHeaderNC(StaticStrings::Errors, StringUtils::itoa(errors));
}
// success

View File

@ -852,17 +852,18 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
setResponseCode(static_cast<GeneralResponse::ResponseCode>(
res->result->getHttpReturnCode()));
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response);
if (_response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
_response->setContentType(
httpResponse->setContentType(
res->result->getHeaderField(StaticStrings::ContentTypeHeader, dummy));
_response->body().swap(&(res->result->getBody()));
httpResponse->body().swap(&(res->result->getBody()));
auto const& resultHeaders = res->result->getHeaderFields();
for (auto const& it : resultHeaders) {
_response->setHeader(it.first, it.second);
httpResponse->setHeader(it.first, it.second);
}
}
@ -1004,30 +1005,31 @@ void RestReplicationHandler::handleCommandLoggerFollow() {
setResponseCode(GeneralResponse::ResponseCode::OK);
}
if (_response == nullptr) {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response);
if (httpResponse == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
_response->setContentType(GeneralResponse::ContentType::DUMP);
httpResponse->setContentType(GeneralResponse::ContentType::DUMP);
// set headers
_response->setHeaderNC(TRI_REPLICATION_HEADER_CHECKMORE,
checkMore ? "true" : "false");
httpResponse->setHeaderNC(TRI_REPLICATION_HEADER_CHECKMORE,
checkMore ? "true" : "false");
_response->setHeaderNC(TRI_REPLICATION_HEADER_LASTINCLUDED,
StringUtils::itoa(dump._lastFoundTick));
httpResponse->setHeaderNC(TRI_REPLICATION_HEADER_LASTINCLUDED,
StringUtils::itoa(dump._lastFoundTick));
_response->setHeaderNC(TRI_REPLICATION_HEADER_LASTTICK,
StringUtils::itoa(state.lastCommittedTick));
httpResponse->setHeaderNC(TRI_REPLICATION_HEADER_LASTTICK,
StringUtils::itoa(state.lastCommittedTick));
_response->setHeaderNC(TRI_REPLICATION_HEADER_ACTIVE, "true");
httpResponse->setHeaderNC(TRI_REPLICATION_HEADER_ACTIVE, "true");
_response->setHeaderNC(TRI_REPLICATION_HEADER_FROMPRESENT,
dump._fromTickIncluded ? "true" : "false");
httpResponse->setHeaderNC(TRI_REPLICATION_HEADER_FROMPRESENT,
dump._fromTickIncluded ? "true" : "false");
if (length > 0) {
// transfer ownership of the buffer contents
_response->body().set(dump._buffer);
httpResponse->body().set(dump._buffer);
// to avoid double freeing
TRI_StealStringBuffer(dump._buffer);
@ -1104,6 +1106,7 @@ void RestReplicationHandler::handleCommandDetermineOpenTransactions() {
setResponseCode(GeneralResponse::ResponseCode::OK);
}
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response);
if (_response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
@ -1118,7 +1121,7 @@ void RestReplicationHandler::handleCommandDetermineOpenTransactions() {
if (length > 0) {
// transfer ownership of the buffer contents
_response->body().set(dump._buffer);
httpResponse->body().set(dump._buffer);
// to avoid double freeing
TRI_StealStringBuffer(dump._buffer);

View File

@ -92,11 +92,10 @@ void SocketTask::setKeepAliveTimeout(double timeout) {
////////////////////////////////////////////////////////////////////////////////
bool SocketTask::fillReadBuffer() {
// reserve some memory for reading
if (_readBuffer->reserve(READ_BLOCK_SIZE + 1) == TRI_ERROR_OUT_OF_MEMORY) {
// out of memory
LOG(TRACE) << "out of memory";
return false;
}

View File

@ -57,27 +57,16 @@ class SocketTask : virtual public Task, public ConnectionStatisticsAgent {
private:
static size_t const READ_BLOCK_SIZE = 10000;
//////////////////////////////////////////////////////////////////////////////
/// @brief constructs a new task with a given socket
//////////////////////////////////////////////////////////////////////////////
public:
// @brief constructs a new task with a given socket
SocketTask(TRI_socket_t, double);
//////////////////////////////////////////////////////////////////////////////
/// @brief deletes a socket task
///
/// This method will close the underlying socket.
//////////////////////////////////////////////////////////////////////////////
protected:
// This method will close the underlying socket.
~SocketTask();
//////////////////////////////////////////////////////////////////////////////
/// set a request timeout
//////////////////////////////////////////////////////////////////////////////
public:
// set a request timeout
void setKeepAliveTimeout(double);
protected:
@ -91,17 +80,8 @@ class SocketTask : virtual public Task, public ConnectionStatisticsAgent {
virtual bool fillReadBuffer();
//////////////////////////////////////////////////////////////////////////////
/// @brief handles a read
//////////////////////////////////////////////////////////////////////////////
virtual bool handleRead() = 0;
//////////////////////////////////////////////////////////////////////////////
/// @brief handles a write
//////////////////////////////////////////////////////////////////////////////
virtual bool handleWrite();
virtual bool handleRead() = 0; // called by handleEvent
virtual bool handleWrite(); // called by handleEvent
//////////////////////////////////////////////////////////////////////////////
/// @brief called if write buffer has been sent
@ -112,10 +92,7 @@ class SocketTask : virtual public Task, public ConnectionStatisticsAgent {
virtual void completedWriteBuffer() = 0;
//////////////////////////////////////////////////////////////////////////////
/// @brief handles a keep-alive timeout
//////////////////////////////////////////////////////////////////////////////
// handles a keep-alive timeout
virtual void handleTimeout() = 0;
protected:
@ -134,75 +111,42 @@ class SocketTask : virtual public Task, public ConnectionStatisticsAgent {
protected:
bool setup(Scheduler*, EventLoop) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief cleans up the task by unregistering all watchers
//////////////////////////////////////////////////////////////////////////////
void cleanup() override;
// calls handleRead and handleWrite
bool handleEvent(EventToken token, EventType) override;
protected:
//////////////////////////////////////////////////////////////////////////////
/// @brief event for keep-alive timeout
//////////////////////////////////////////////////////////////////////////////
EventToken _keepAliveWatcher;
//////////////////////////////////////////////////////////////////////////////
/// @brief event for read
//////////////////////////////////////////////////////////////////////////////
EventToken _readWatcher;
//////////////////////////////////////////////////////////////////////////////
/// @brief event for write
//////////////////////////////////////////////////////////////////////////////
EventToken _writeWatcher;
//////////////////////////////////////////////////////////////////////////////
/// @brief communication socket
//////////////////////////////////////////////////////////////////////////////
TRI_socket_t _commSocket;
//////////////////////////////////////////////////////////////////////////////
/// @brief keep-alive timeout in seconds
//////////////////////////////////////////////////////////////////////////////
double _keepAliveTimeout;
//////////////////////////////////////////////////////////////////////////////
/// @brief the current write buffer
//////////////////////////////////////////////////////////////////////////////
basics::StringBuffer* _writeBuffer;
//////////////////////////////////////////////////////////////////////////////
/// @brief the current write buffer statistics
//////////////////////////////////////////////////////////////////////////////
TRI_request_statistics_t* _writeBufferStatistics;
//////////////////////////////////////////////////////////////////////////////
/// @brief number of bytes already written
//////////////////////////////////////////////////////////////////////////////
size_t _writeLength;
//////////////////////////////////////////////////////////////////////////////
/// @brief read buffer
///
/// The function fillReadBuffer stores the data in this buffer.
//////////////////////////////////////////////////////////////////////////////
basics::StringBuffer* _readBuffer;
//////////////////////////////////////////////////////////////////////////////
/// @brief client has closed the connection
//////////////////////////////////////////////////////////////////////////////
bool _clientClosed;
private:

View File

@ -498,11 +498,16 @@ static v8::Handle<v8::Object> RequestCppToV8(v8::Isolate* isolate,
// copy cookies
v8::Handle<v8::Object> cookiesObject = v8::Object::New(isolate);
for (auto& it : request->cookieValues()) {
cookiesObject->ForceSet(TRI_V8_STD_STRING(it.first),
TRI_V8_STD_STRING(it.second));
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(request);
if (httpRequest == nullptr) {
// maybe we can just continue
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
} else {
for (auto& it : httpRequest->cookieValues()) {
cookiesObject->ForceSet(TRI_V8_STD_STRING(it.first),
TRI_V8_STD_STRING(it.second));
}
}
TRI_GET_GLOBAL_STRING(CookiesKey);
req->ForceSet(CookiesKey, cookiesObject);

View File

@ -124,7 +124,8 @@ bool TRI_collection_t::IsAllowedName(bool allowSystem, std::string const& name)
/// @brief updates the parameter info block
int TRI_collection_t::updateCollectionInfo(TRI_vocbase_t* vocbase,
VPackSlice const& slice, bool doSync) {
VPackSlice const& slice,
bool doSync) {
WRITE_LOCKER(writeLocker, _infoLock);
if (!slice.isNone()) {
@ -139,8 +140,7 @@ int TRI_collection_t::updateCollectionInfo(TRI_vocbase_t* vocbase,
}
/// @brief seal a datafile
int TRI_collection_t::sealDatafile(TRI_datafile_t* datafile,
bool isCompactor) {
int TRI_collection_t::sealDatafile(TRI_datafile_t* datafile, bool isCompactor) {
int res = TRI_SealDatafile(datafile);
if (res != TRI_ERROR_NO_ERROR) {
@ -168,7 +168,7 @@ int TRI_collection_t::sealDatafile(TRI_datafile_t* datafile,
/// @brief rotate the active journal - will do nothing if there is no journal
int TRI_collection_t::rotateActiveJournal() {
WRITE_LOCKER(writeLocker, _filesLock);
// note: only journals need to be handled here as the journal is the
// only place that's ever written to. if a journal is full, it will have been
// sealed and synced already
@ -178,20 +178,20 @@ int TRI_collection_t::rotateActiveJournal() {
TRI_datafile_t* datafile = _journals[0];
TRI_ASSERT(datafile != nullptr);
if (_state != TRI_COL_STATE_WRITE) {
return TRI_ERROR_ARANGO_NO_JOURNAL;
}
// make sure we have enough room in the target vector before we go on
_datafiles.reserve(_datafiles.size() + 1);
int res = sealDatafile(datafile, false);
TRI_ASSERT(!_journals.empty());
_journals.erase(_journals.begin());
TRI_ASSERT(_journals.empty());
// shouldn't throw as we reserved enough space before
_datafiles.emplace_back(datafile);
@ -205,7 +205,7 @@ int TRI_collection_t::rotateActiveJournal() {
int TRI_collection_t::syncActiveJournal() {
WRITE_LOCKER(writeLocker, _filesLock);
// note: only journals need to be handled here as the journal is the
// only place that's ever written to. if a journal is full, it will have been
// sealed and synced already
@ -229,8 +229,9 @@ int TRI_collection_t::syncActiveJournal() {
bool ok = datafile->sync(datafile, synced, written);
if (ok) {
LOG_TOPIC(TRACE, Logger::COLLECTOR) << "msync succeeded " << (void*) synced << ", size "
<< (written - synced);
LOG_TOPIC(TRACE, Logger::COLLECTOR) << "msync succeeded "
<< (void*)synced << ", size "
<< (written - synced);
datafile->_synced = written;
} else {
res = TRI_errno();
@ -239,7 +240,8 @@ int TRI_collection_t::syncActiveJournal() {
res = TRI_ERROR_INTERNAL;
}
LOG_TOPIC(ERR, Logger::COLLECTOR) << "msync failed with: " << TRI_last_error();
LOG_TOPIC(ERR, Logger::COLLECTOR)
<< "msync failed with: " << TRI_last_error();
datafile->_state = TRI_DF_STATE_WRITE_ERROR;
}
}
@ -250,20 +252,20 @@ int TRI_collection_t::syncActiveJournal() {
////////////////////////////////////////////////////////////////////////////////
/// @brief reserve space in the current journal. if no create exists or the
/// current journal cannot provide enough space, close the old journal and
/// current journal cannot provide enough space, close the old journal and
/// create a new one
////////////////////////////////////////////////////////////////////////////////
int TRI_collection_t::reserveJournalSpace(TRI_voc_tick_t tick,
int TRI_collection_t::reserveJournalSpace(TRI_voc_tick_t tick,
TRI_voc_size_t size,
char*& resultPosition,
TRI_datafile_t*& resultDatafile) {
TRI_datafile_t*& resultDatafile) {
// reset results
resultPosition = nullptr;
resultDatafile = nullptr;
WRITE_LOCKER(writeLocker, _filesLock);
// start with configured journal size
TRI_voc_size_t targetSize = _info.maximalSize();
@ -318,20 +320,23 @@ int TRI_collection_t::reserveJournalSpace(TRI_voc_tick_t tick,
if (res != TRI_ERROR_ARANGO_DATAFILE_FULL) {
// some other error
LOG_TOPIC(ERR, Logger::COLLECTOR) << "cannot select journal: '" << TRI_last_error() << "'";
LOG_TOPIC(ERR, Logger::COLLECTOR) << "cannot select journal: '"
<< TRI_last_error() << "'";
return res;
}
// TRI_ERROR_ARANGO_DATAFILE_FULL...
// TRI_ERROR_ARANGO_DATAFILE_FULL...
// journal is full, close it and sync
LOG_TOPIC(DEBUG, Logger::COLLECTOR) << "closing full journal '" << datafile->getName(datafile) << "'";
LOG_TOPIC(DEBUG, Logger::COLLECTOR) << "closing full journal '"
<< datafile->getName(datafile) << "'";
// make sure we have enough room in the target vector before we go on
_datafiles.reserve(_datafiles.size() + 1);
res = sealDatafile(datafile, false);
// move journal into datafiles vector, regardless of whether an error occurred
// move journal into datafiles vector, regardless of whether an error
// occurred
TRI_ASSERT(!_journals.empty());
_journals.erase(_journals.begin());
TRI_ASSERT(_journals.empty());
@ -342,8 +347,8 @@ int TRI_collection_t::reserveJournalSpace(TRI_voc_tick_t tick,
// an error occurred, we must stop here
return res;
}
} // otherwise, next iteration!
} // otherwise, next iteration!
return TRI_ERROR_ARANGO_NO_JOURNAL;
}
@ -351,16 +356,17 @@ int TRI_collection_t::reserveJournalSpace(TRI_voc_tick_t tick,
/// @brief create compactor file
////////////////////////////////////////////////////////////////////////////////
TRI_datafile_t* TRI_collection_t::createCompactor(TRI_voc_fid_t fid,
TRI_datafile_t* TRI_collection_t::createCompactor(TRI_voc_fid_t fid,
TRI_voc_size_t maximalSize) {
try {
WRITE_LOCKER(writeLocker, _filesLock);
TRI_ASSERT(_compactors.empty());
// reserve enough space for the later addition
_compactors.reserve(_compactors.size() + 1);
TRI_datafile_t* compactor = createDatafile(fid, static_cast<TRI_voc_size_t>(maximalSize), true);
TRI_datafile_t* compactor =
createDatafile(fid, static_cast<TRI_voc_size_t>(maximalSize), true);
if (compactor != nullptr) {
// should not throw, as we've reserved enough space before
@ -392,12 +398,12 @@ int TRI_collection_t::closeCompactor(TRI_datafile_t* datafile) {
return sealDatafile(datafile, true);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief replace a datafile with a compactor
////////////////////////////////////////////////////////////////////////////////
int TRI_collection_t::replaceDatafileWithCompactor(TRI_datafile_t* datafile,
int TRI_collection_t::replaceDatafileWithCompactor(TRI_datafile_t* datafile,
TRI_datafile_t* compactor) {
TRI_ASSERT(datafile != nullptr);
TRI_ASSERT(compactor != nullptr);
@ -431,9 +437,11 @@ int TRI_collection_t::replaceDatafileWithCompactor(TRI_datafile_t* datafile,
////////////////////////////////////////////////////////////////////////////////
TRI_datafile_t* TRI_collection_t::createDatafile(TRI_voc_fid_t fid,
TRI_voc_size_t journalSize, bool isCompactor) {
TRI_voc_size_t journalSize,
bool isCompactor) {
TRI_ASSERT(fid > 0);
TRI_document_collection_t* document = static_cast<TRI_document_collection_t*>(this);
TRI_document_collection_t* document =
static_cast<TRI_document_collection_t*>(this);
TRI_ASSERT(document != nullptr);
// create an entry for the new datafile
@ -495,15 +503,16 @@ TRI_datafile_t* TRI_collection_t::createDatafile(TRI_voc_fid_t fid,
TRI_ASSERT(datafile != nullptr);
if (isCompactor) {
LOG(TRACE) << "created new compactor '" << datafile->getName(datafile) << "'";
LOG(TRACE) << "created new compactor '" << datafile->getName(datafile)
<< "'";
} else {
LOG(TRACE) << "created new journal '" << datafile->getName(datafile) << "'";
}
// create a collection header, still in the temporary file
TRI_df_marker_t* position;
int res = TRI_ReserveElementDatafile(datafile, sizeof(TRI_col_header_marker_t),
&position, journalSize);
int res = TRI_ReserveElementDatafile(
datafile, sizeof(TRI_col_header_marker_t), &position, journalSize);
TRI_IF_FAILURE("CreateJournalDocumentCollectionReserve1") {
res = TRI_ERROR_DEBUG;
@ -525,8 +534,9 @@ TRI_datafile_t* TRI_collection_t::createDatafile(TRI_voc_fid_t fid,
}
TRI_col_header_marker_t cm;
DatafileHelper::InitMarker(reinterpret_cast<TRI_df_marker_t*>(&cm), TRI_DF_MARKER_COL_HEADER,
sizeof(TRI_col_header_marker_t), static_cast<TRI_voc_tick_t>(fid));
DatafileHelper::InitMarker(
reinterpret_cast<TRI_df_marker_t*>(&cm), TRI_DF_MARKER_COL_HEADER,
sizeof(TRI_col_header_marker_t), static_cast<TRI_voc_tick_t>(fid));
cm._cid = document->_info.id();
res = TRI_WriteCrcElementDatafile(datafile, position, &cm.base, false);
@ -563,7 +573,7 @@ TRI_datafile_t* TRI_collection_t::createDatafile(TRI_voc_fid_t fid,
if (!ok) {
LOG(ERR) << "failed to rename journal '" << datafile->getName(datafile)
<< "' to '" << filename << "': " << TRI_last_error();
<< "' to '" << filename << "': " << TRI_last_error();
TRI_CloseDatafile(datafile);
TRI_UnlinkFile(datafile->getName(datafile));
@ -574,13 +584,13 @@ TRI_datafile_t* TRI_collection_t::createDatafile(TRI_voc_fid_t fid,
return nullptr;
} else {
LOG(TRACE) << "renamed journal from '" << datafile->getName(datafile)
<< "' to '" << filename << "'";
<< "' to '" << filename << "'";
}
}
return datafile;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief remove a compactor file from the list of compactors
//////////////////////////////////////////////////////////////////////////////
@ -646,7 +656,9 @@ int TRI_collection_t::removeIndexFile(TRI_idx_iid_t id) {
/// @brief iterates over all index files of a collection
////////////////////////////////////////////////////////////////////////////////
void TRI_collection_t::iterateIndexes(std::function<bool(std::string const&, void*)> const& callback, void* data) {
void TRI_collection_t::iterateIndexes(
std::function<bool(std::string const&, void*)> const& callback,
void* data) {
// iterate over all index files
for (auto const& filename : _indexFiles) {
bool ok = callback(filename, data);
@ -681,7 +693,8 @@ static bool FilenameComparator(std::string const& lhs, std::string const& rhs) {
/// the filename
////////////////////////////////////////////////////////////////////////////////
static bool DatafileComparator(TRI_datafile_t const* lhs, TRI_datafile_t const* rhs) {
static bool DatafileComparator(TRI_datafile_t const* lhs,
TRI_datafile_t const* rhs) {
uint64_t const numLeft =
(lhs->_filename != nullptr ? GetNumericFilenamePart(lhs->_filename) : 0);
uint64_t const numRight =
@ -833,18 +846,22 @@ static TRI_col_file_structure_t ScanCollectionDirectory(char const* path) {
// ups, what kind of file is that
else {
LOG_TOPIC(ERR, Logger::DATAFILES) << "unknown datafile type '"
<< file << "'";
LOG_TOPIC(ERR, Logger::DATAFILES) << "unknown datafile type '" << file
<< "'";
}
}
}
// now sort the files in the structures that we created.
// the sorting allows us to iterate the files in the correct order
std::sort(structure.journals.begin(), structure.journals.end(), FilenameComparator);
std::sort(structure.compactors.begin(), structure.compactors.end(), FilenameComparator);
std::sort(structure.datafiles.begin(), structure.datafiles.end(), FilenameComparator);
std::sort(structure.indexes.begin(), structure.indexes.end(), FilenameComparator);
std::sort(structure.journals.begin(), structure.journals.end(),
FilenameComparator);
std::sort(structure.compactors.begin(), structure.compactors.end(),
FilenameComparator);
std::sort(structure.datafiles.begin(), structure.datafiles.end(),
FilenameComparator);
std::sort(structure.indexes.begin(), structure.indexes.end(),
FilenameComparator);
return structure;
}
@ -961,7 +978,8 @@ static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) {
filename = newName;
}
TRI_datafile_t* datafile = TRI_OpenDatafile(filename.c_str(), ignoreErrors);
TRI_datafile_t* datafile =
TRI_OpenDatafile(filename.c_str(), ignoreErrors);
if (datafile == nullptr) {
collection->_lastError = TRI_errno();
@ -979,8 +997,10 @@ static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) {
char const* ptr = datafile->_data;
// skip the datafile header
ptr += DatafileHelper::AlignedSize<size_t>(sizeof(TRI_df_header_marker_t));
TRI_col_header_marker_t const* cm = reinterpret_cast<TRI_col_header_marker_t const*>(ptr);
ptr +=
DatafileHelper::AlignedSize<size_t>(sizeof(TRI_df_header_marker_t));
TRI_col_header_marker_t const* cm =
reinterpret_cast<TRI_col_header_marker_t const*>(ptr);
if (cm->base.getType() != TRI_DF_MARKER_COL_HEADER) {
LOG(ERR) << "collection header mismatch in file '" << filename
@ -1085,7 +1105,6 @@ static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) {
std::sort(journals.begin(), journals.end(), DatafileComparator);
std::sort(compactors.begin(), compactors.end(), DatafileComparator);
// add the datafiles and journals
collection->_datafiles = datafiles;
collection->_journals = journals;
@ -1211,8 +1230,7 @@ TRI_collection_t* TRI_CreateCollection(
TRI_set_errno(TRI_ERROR_ARANGO_COLLECTION_DIRECTORY_ALREADY_EXISTS);
LOG(ERR) << "cannot create collection '" << parameters.name()
<< "' in directory '" << dirname
<< "': directory already exists";
<< "' in directory '" << dirname << "': directory already exists";
return nullptr;
}
@ -1240,7 +1258,7 @@ TRI_collection_t* TRI_CreateCollection(
std::string const tmpfile(
arangodb::basics::FileUtils::buildFilename(tmpname.c_str(), ".tmp"));
res = TRI_WriteFile(tmpfile.c_str(), "", 0);
// this file will be renamed to this filename later...
std::string const tmpfile2(
arangodb::basics::FileUtils::buildFilename(dirname.c_str(), ".tmp"));
@ -1273,7 +1291,7 @@ TRI_collection_t* TRI_CreateCollection(
// .tmp file in it
InitCollection(vocbase, collection, dirname, parameters);
// delete .tmp file
TRI_UnlinkFile(tmpfile2.c_str());
@ -1331,7 +1349,7 @@ VocbaseCollectionInfo::VocbaseCollectionInfo(CollectionInfo const& other)
std::string const name = other.name();
memset(_name, 0, sizeof(_name));
memcpy(_name, name.c_str(), name.size());
VPackSlice keyOptionsSlice(other.keyOptions());
if (!keyOptionsSlice.isNone()) {
@ -1385,7 +1403,8 @@ VocbaseCollectionInfo::VocbaseCollectionInfo(TRI_vocbase_t* vocbase,
std::string const& name,
VPackSlice const& options,
bool forceIsSystem)
: VocbaseCollectionInfo(vocbase, name, TRI_COL_TYPE_DOCUMENT, options, forceIsSystem) {}
: VocbaseCollectionInfo(vocbase, name, TRI_COL_TYPE_DOCUMENT, options,
forceIsSystem) {}
VocbaseCollectionInfo::VocbaseCollectionInfo(TRI_vocbase_t* vocbase,
std::string const& name,
@ -1433,8 +1452,8 @@ VocbaseCollectionInfo::VocbaseCollectionInfo(TRI_vocbase_t* vocbase,
if (_maximalSize == 0 && maximalSize != 0) {
_maximalSize = static_cast<TRI_voc_size_t>(pageSize);
}
if (options.hasKey("count")) {
if (options.hasKey("count")) {
_initialCount =
arangodb::basics::VelocyPackHelper::getNumericValue<int64_t>(
options, "count", -1);
@ -1555,10 +1574,11 @@ VocbaseCollectionInfo VocbaseCollectionInfo::fromFile(
std::string const& path, TRI_vocbase_t* vocbase, std::string const& collectionName,
bool versionWarning) {
// find parameter file
std::string filename = arangodb::basics::FileUtils::buildFilename(path, TRI_VOC_PARAMETER_FILE);
std::string filename =
arangodb::basics::FileUtils::buildFilename(path, TRI_VOC_PARAMETER_FILE);
if (!TRI_ExistsFile(filename.c_str())) {
filename += ".tmp"; // try file with .tmp extension
filename += ".tmp"; // try file with .tmp extension
if (!TRI_ExistsFile(filename.c_str())) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE);
}
@ -1575,7 +1595,8 @@ VocbaseCollectionInfo VocbaseCollectionInfo::fromFile(
if (filename.substr(filename.size() - 4, 4) == ".tmp") {
// we got a tmp file. Now try saving the original file
arangodb::basics::VelocyPackHelper::velocyPackToFile(filename.c_str(), slice, true);
arangodb::basics::VelocyPackHelper::velocyPackToFile(filename.c_str(),
slice, true);
}
// fiddle "isSystem" value, which is not contained in the JSON file
@ -1685,15 +1706,18 @@ void VocbaseCollectionInfo::setDeleted(bool deleted) { _deleted = deleted; }
void VocbaseCollectionInfo::clearKeyOptions() { _keyOptions.reset(); }
int VocbaseCollectionInfo::saveToFile(std::string const& path, bool forceSync) const {
std::string filename = basics::FileUtils::buildFilename(path, TRI_VOC_PARAMETER_FILE);
int VocbaseCollectionInfo::saveToFile(std::string const& path,
bool forceSync) const {
std::string filename =
basics::FileUtils::buildFilename(path, TRI_VOC_PARAMETER_FILE);
VPackBuilder builder;
builder.openObject();
TRI_CreateVelocyPackCollectionInfo(*this, builder);
builder.close();
bool ok = VelocyPackHelper::velocyPackToFile(filename.c_str(), builder.slice(), forceSync);
bool ok = VelocyPackHelper::velocyPackToFile(filename.c_str(),
builder.slice(), forceSync);
if (!ok) {
int res = TRI_errno();
@ -1767,7 +1791,7 @@ void VocbaseCollectionInfo::update(VPackSlice const& slice, bool preferDefaults,
_indexBuckets =
arangodb::basics::VelocyPackHelper::getNumericValue<uint32_t>(
slice, "indexBuckets", _indexBuckets);
_initialCount =
arangodb::basics::VelocyPackHelper::getNumericValue<int64_t>(
slice, "count", _initialCount);
@ -1983,8 +2007,8 @@ bool TRI_IterateTicksCollection(char const* const path,
// no journal found for collection. should not happen normally, but if
// it does, we need to grab the ticks from the datafiles, too
return IterateFiles(structure.datafiles, iterator, data);
}
}
// compactor files don't need to be iterated... they just contain data
// copied
// from other files, so their tick values will never be any higher

View File

@ -147,16 +147,17 @@ typedef long suseconds_t;
#endif
#include <algorithm>
#include <atomic>
#include <cmath>
#include <deque>
#include <functional>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include <unordered_map>
#include <unordered_set>
#include <memory>
#include <atomic>
#include <vector>
#define TRI_WITHIN_COMMON 1
#include "Basics/voc-errors.h"

View File

@ -52,14 +52,6 @@ void* memmem(void const* haystack, size_t haystackLength, void const* needle, si
int gettimeofday(struct timeval* tv, void* tz);
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief gets a line
////////////////////////////////////////////////////////////////////////////////
#ifndef TRI_HAVE_GETLINE
ssize_t getline(char**, size_t*, FILE*);
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief safe localtime
////////////////////////////////////////////////////////////////////////////////

View File

@ -211,6 +211,8 @@ add_library(${LIB_ARANGO} STATIC
Rest/FakeRequest.cpp
Rest/GeneralRequest.cpp
Rest/GeneralResponse.cpp
Rest/VppRequest.cpp
Rest/VppResponse.cpp
Rest/HttpRequest.cpp
Rest/HttpResponse.cpp
Rest/InitializeRest.cpp

View File

@ -43,8 +43,6 @@ class EndpointList {
bool remove(std::string const&, Endpoint**);
std::vector<std::string> all() const;
std::vector<std::string> all(Endpoint::TransportType transport) const;
// std::map<std::string, Endpoint*> matching(Endpoint::TransportType,
// Endpoint::EncryptionType) const;
std::map<std::string, Endpoint*> allEndpoints() const { return _endpoints; }
bool hasSsl() const;
void dump() const;

View File

@ -45,12 +45,9 @@ class FakeRequest : public GeneralRequest {
// the content length
int64_t contentLength() const override { return _contentLength; }
std::unordered_map<std::string, std::string> cookieValues() const override {
return _cookies;
}
// Payload
velocypack::Slice payload(arangodb::velocypack::Options const*) override final;
velocypack::Slice payload(
arangodb::velocypack::Options const*) override final;
void setHeaders(std::unordered_map<std::string, std::string> const& headers) {
_headers = headers; // this is from the base class

View File

@ -34,7 +34,7 @@ using namespace arangodb::basics;
std::string GeneralRequest::translateVersion(ProtocolVersion version) {
switch (version) {
case ProtocolVersion::VSTREAM_1_0:
case ProtocolVersion::VPP_1_0:
return "VPP/1.0";
case ProtocolVersion::HTTP_1_1:
@ -44,12 +44,10 @@ std::string GeneralRequest::translateVersion(ProtocolVersion version) {
return "HTTP/1.0";
case ProtocolVersion::UNKNOWN:
default: {
return "HTTP/1.0";
}
default: { return "HTTP/1.0"; }
}
return "UNKNOWN"; // in order please MSVC
return "UNKNOWN"; // in order please MSVC
}
std::string GeneralRequest::translateMethod(RequestType method) {
@ -89,7 +87,7 @@ std::string GeneralRequest::translateMethod(RequestType method) {
return "UNKNOWN";
}
return "UNKNOWN"; // in order please MSVC
return "UNKNOWN"; // in order please MSVC
}
GeneralRequest::RequestType GeneralRequest::translateMethod(
@ -122,12 +120,13 @@ GeneralRequest::RequestType GeneralRequest::translateMethod(
}
void GeneralRequest::appendMethod(RequestType method, StringBuffer* buffer) {
// append RequestType as string value to given String buffer
buffer->appendText(translateMethod(method));
buffer->appendChar(' ');
}
GeneralRequest::RequestType GeneralRequest::findRequestType(char const* ptr,
size_t const length) {
GeneralRequest::RequestType GeneralRequest::findRequestType(
char const* ptr, size_t const length) {
switch (length) {
case 3:
if (ptr[0] == 'g' && ptr[1] == 'e' && ptr[2] == 't') {
@ -202,6 +201,11 @@ void GeneralRequest::setFullUrl(char const* begin, char const* end) {
_fullUrl = std::string(begin, end - begin);
}
void GeneralRequest::setFullUrl(std::string url) {
TRI_ASSERT(!url.empty());
_fullUrl = std::move(url);
}
void GeneralRequest::addSuffix(std::string&& part) {
_suffix.emplace_back(StringUtils::urlDecode(part));
}
@ -216,7 +220,8 @@ std::string const& GeneralRequest::header(std::string const& key) const {
return it->second;
}
std::string const& GeneralRequest::header(std::string const& key, bool& found) const {
std::string const& GeneralRequest::header(std::string const& key,
bool& found) const {
auto it = _headers.find(key);
if (it == _headers.end()) {
@ -240,7 +245,8 @@ std::string const& GeneralRequest::value(std::string const& key) const {
return StaticStrings::Empty;
}
std::string const& GeneralRequest::value(std::string const& key, bool& found) const {
std::string const& GeneralRequest::value(std::string const& key,
bool& found) const {
if (!_values.empty()) {
auto it = _values.find(key);
@ -254,16 +260,13 @@ std::string const& GeneralRequest::value(std::string const& key, bool& found) co
return StaticStrings::Empty;
}
void GeneralRequest::setArrayValue(char* key, size_t length, char const* value) {
void GeneralRequest::setArrayValue(char* key, size_t length,
char const* value) {
_arrayValues[std::string(key, length)].emplace_back(value);
}
bool GeneralRequest::velocyPackResponse() const {
#if 0
// currently deactivated
// needs only to be used in http case?!
std::string const& result = header(StaticStrings::Accept);
return (std::string::npos != result.find(StaticStrings::MimeTypeVPack));
#else
return false;
#endif
}

View File

@ -72,20 +72,20 @@ class GeneralRequest {
ILLEGAL // must be last
};
enum class ProtocolVersion { HTTP_1_0, HTTP_1_1, VSTREAM_1_0, UNKNOWN };
enum class ProtocolVersion { HTTP_1_0, HTTP_1_1, VPP_1_0, UNKNOWN };
enum class ContentType { UNSET, VPACK, JSON };
public:
// translate the HTTP protocol version
static std::string translateVersion(ProtocolVersion);
// translate an enum value into an HTTP method string
// translate an RequestType enum value into an "HTTP method string"
static std::string translateMethod(RequestType);
// translate an HTTP method string into an enum value
// translate "HTTP method string" into RequestType enum value
static RequestType translateMethod(std::string const&);
// append the request method string to a string buffer
// append RequestType as string value to given String buffer
static void appendMethod(RequestType, arangodb::basics::StringBuffer*);
protected:
@ -101,15 +101,13 @@ class GeneralRequest {
_isRequestContextOwner(false),
_type(RequestType::ILLEGAL),
_contentType(ContentType::UNSET),
_contentTypeResponse(ContentType::UNSET){}
_contentTypeResponse(ContentType::UNSET) {}
virtual ~GeneralRequest();
public:
ProtocolVersion protocolVersion() const { return _version; }
// http, https or vpp
char const* protocol() const { return _protocol; }
char const* protocol() const { return _protocol; } // http, https or vpp
void setProtocol(char const* protocol) { _protocol = protocol; }
ConnectionInfo const& connectionInfo() const { return _connectionInfo; }
@ -138,6 +136,7 @@ class GeneralRequest {
std::string const& fullUrl() const { return _fullUrl; }
void setFullUrl(char const* begin, char const* end);
void setFullUrl(std::string url);
// consists of the URL without the host and without any parameters.
std::string const& requestPath() const { return _requestPath; }
@ -156,9 +155,10 @@ class GeneralRequest {
std::vector<std::string> const& suffix() const { return _suffix; }
void addSuffix(std::string&& part);
// The key must be lowercase.
// get value from headers map. The key must be lowercase.
std::string const& header(std::string const& key) const;
std::string const& header(std::string const& key, bool& found) const;
// return headers map
std::unordered_map<std::string, std::string> const& headers() const {
return _headers;
}
@ -178,7 +178,8 @@ class GeneralRequest {
bool velocyPackResponse() const;
// should toVelocyPack be renamed to payload?
virtual VPackSlice payload(arangodb::velocypack::Options const* options = &VPackOptions::Defaults) = 0;
virtual VPackSlice payload(arangodb::velocypack::Options const*
options = &VPackOptions::Defaults) = 0;
std::shared_ptr<VPackBuilder> toVelocyPackBuilderPtr(
arangodb::velocypack::Options const* options) {
@ -190,8 +191,6 @@ class GeneralRequest {
// virtual std::string const& body() const = 0;
virtual int64_t contentLength() const = 0;
virtual std::unordered_map<std::string, std::string> cookieValues() const = 0;
ContentType contentType() const { return _contentType; }
protected:
@ -200,7 +199,7 @@ class GeneralRequest {
protected:
ProtocolVersion _version;
char const* _protocol;
char const* _protocol; // http, https or vpp
// connection info
ConnectionInfo _connectionInfo;
@ -213,16 +212,16 @@ class GeneralRequest {
bool _isRequestContextOwner;
// information about the payload
RequestType _type;
RequestType _type; // GET, POST, ..
std::string _fullUrl;
std::string _requestPath;
std::string _prefix;
std::string _prefix; // part of path matched by rest route
std::vector<std::string> _suffix;
std::unordered_map<std::string, std::string>
_headers; // gets set by httpRequest: parseHeaders -> setHeaders
std::unordered_map<std::string, std::string> _values;
std::unordered_map<std::string, std::vector<std::string>> _arrayValues;
ContentType _contentType;
ContentType _contentType; // UNSET, VPACK, JSON
ContentType _contentTypeResponse;
};
}

View File

@ -115,7 +115,6 @@ class GeneralResponse {
CONNECTION_CLOSE
};
public:
// converts the response code to a string for delivering to a http client.
static std::string responseString(ResponseCode);
@ -127,7 +126,7 @@ class GeneralResponse {
static ResponseCode responseCode(int);
// TODO OBI - check what can be implemented in this base class
virtual basics::StringBuffer& body() = 0;
// virtual basics::StringBuffer& body() = 0;
virtual void setContentType(ContentType type) = 0;
virtual void setContentType(std::string const& contentType) = 0;
virtual void setContentType(std::string&& contentType) = 0;
@ -141,6 +140,8 @@ class GeneralResponse {
virtual ~GeneralResponse() {}
public:
// response codes are http response codes, but they are used in other
// protocols as well
ResponseCode responseCode() const { return _responseCode; }
void setResponseCode(ResponseCode responseCode) {
_responseCode = responseCode;
@ -175,8 +176,9 @@ class GeneralResponse {
arangodb::velocypack::Options const&) = 0;
protected:
ResponseCode _responseCode;
std::unordered_map<std::string, std::string> _headers;
ResponseCode _responseCode; // http response code
std::unordered_map<std::string, std::string>
_headers; // headers/metadata map
};
}

View File

@ -67,7 +67,7 @@ class HttpRequest : public GeneralRequest {
std::string const& cookieValue(std::string const& key) const;
std::string const& cookieValue(std::string const& key, bool& found) const;
std::unordered_map<std::string, std::string> cookieValues() const override {
std::unordered_map<std::string, std::string> cookieValues() const {
return _cookies;
}

View File

@ -65,7 +65,7 @@ class HttpResponse : public GeneralResponse {
// information to the string buffer. Note that adding data to the body
// invalidates any previously returned header. You must call header
// again.
basics::StringBuffer& body() override { return _body; }
basics::StringBuffer& body() { return _body; }
size_t bodySize() const;
/// @brief set type of connection

59
lib/Rest/VppRequest.cpp Normal file
View File

@ -0,0 +1,59 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#include "VppRequest.h"
#include <velocypack/Builder.h>
#include <velocypack/Options.h>
#include <velocypack/Parser.h>
#include <velocypack/Validator.h>
#include <velocypack/velocypack-aliases.h>
#include "Basics/conversions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Basics/tri-strings.h"
#include "Logger/Logger.h"
using namespace arangodb;
using namespace arangodb::basics;
VppRequest::VppRequest(ConnectionInfo const& connectionInfo,
VPackBuffer<uint8_t>&& header, size_t length)
: GeneralRequest(connectionInfo),
_contentLength(0),
_header(std::move(header)) {
if (0 < length) {
_contentType = ContentType::VPACK;
_contentTypeResponse = ContentType::VPACK;
parseHeader();
}
}
VPackSlice VppRequest::payload(VPackOptions const* options) {
VPackValidator validator;
validator.validate(_payload.data(), _payload.size());
return VPackSlice(_payload.data());
}
void VppRequest::parseHeader() {}

78
lib/Rest/VppRequest.h Normal file
View File

@ -0,0 +1,78 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Achim Brandt
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_REST_VPP_REQUEST_H
#define ARANGODB_REST_VPP_REQUEST_H 1
#include "Rest/GeneralRequest.h"
#include "Endpoint/ConnectionInfo.h"
#include <velocypack/Builder.h>
#include <velocypack/Dumper.h>
#include <velocypack/Options.h>
#include <velocypack/velocypack-aliases.h>
namespace arangodb {
class RestBatchHandler;
namespace rest {
class GeneralCommTask;
class VppCommTask;
// class VppsCommTask;
}
namespace velocypack {
class Builder;
struct Options;
}
class VppRequest : public GeneralRequest {
friend class rest::VppCommTask;
// friend class rest::VppsCommTask;
friend class rest::GeneralCommTask;
friend class RestBatchHandler; // TODO must be removed
private:
VppRequest(ConnectionInfo const& connectionInfo,
VPackBuffer<uint8_t>&& header, size_t length);
public:
~VppRequest() {}
public:
VPackSlice payload(arangodb::velocypack::Options const*) override;
int64_t contentLength() const override { return _contentLength; }
void setPayload(VPackBuffer<uint8_t>&& payload) { _payload = payload; }
private:
void parseHeader(); // converts _header(vpack) to
// _headers(map<string,string>)
int64_t _contentLength;
VPackBuffer<uint8_t> _header;
VPackBuffer<uint8_t> _payload;
const std::unordered_map<std::string, std::string> _cookies; // TODO remove
};
}
#endif

75
lib/Rest/VppResponse.cpp Normal file
View File

@ -0,0 +1,75 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Achim Brandt
////////////////////////////////////////////////////////////////////////////////
#include "VppResponse.h"
#include <velocypack/Builder.h>
#include <velocypack/Dumper.h>
#include <velocypack/Options.h>
#include <velocypack/velocypack-aliases.h>
#include "Basics/Exceptions.h"
#include "Basics/StringBuffer.h"
#include "Basics/StringUtils.h"
#include "Basics/VPackStringBufferAdapter.h"
#include "Basics/VelocyPackDumper.h"
#include "Basics/tri-strings.h"
#include "Rest/VppRequest.h"
using namespace arangodb;
using namespace arangodb::basics;
bool VppResponse::HIDE_PRODUCT_HEADER = false;
VppResponse::VppResponse(ResponseCode code)
: GeneralResponse(code),
_connectionType(CONNECTION_KEEP_ALIVE),
_body(TRI_UNKNOWN_MEM_ZONE, false),
_contentType(ContentType::VPACK) {}
void VppResponse::reset(ResponseCode code) {
_responseCode = code;
_headers.clear();
_connectionType = CONNECTION_KEEP_ALIVE;
_contentType = ContentType::TEXT;
}
void VppResponse::setPayload(GeneralRequest const* request,
arangodb::velocypack::Slice const& slice,
bool generateBody, VPackOptions const& options) {
// VELOCYPACK
if (request != nullptr && request->velocyPackResponse()) {
setContentType(VppResponse::ContentType::VPACK);
// size_t length = static_cast<size_t>(slice.byteSize());
if (generateBody) {
}
} else { // JSON
setContentType(VppResponse::ContentType::JSON);
if (generateBody) {
} else {
}
}
};
void VppResponse::writeHeader(basics::StringBuffer*) {}

84
lib/Rest/VppResponse.h Normal file
View File

@ -0,0 +1,84 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Achim Brandt
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_REST_VPP_RESPONSE_H
#define ARANGODB_REST_VPP_RESPONSE_H 1
#include "Rest/GeneralResponse.h"
#include "Basics/StringBuffer.h"
namespace arangodb {
class RestBatchHandler;
namespace rest {
class VppCommTask;
class GeneralCommTask;
}
class VppResponse : public GeneralResponse {
friend class rest::GeneralCommTask;
friend class rest::VppCommTask;
friend class RestBatchHandler; // TODO must be removed
explicit VppResponse(ResponseCode code);
public:
static bool HIDE_PRODUCT_HEADER;
// required by base
void reset(ResponseCode code) override final;
void setPayload(GeneralRequest const*, arangodb::velocypack::Slice const&,
bool generateBody,
arangodb::velocypack::Options const&) override final;
void writeHeader(basics::StringBuffer*) override;
void setConnectionType(ConnectionType type) override {
_connectionType = type;
}
void setContentType(ContentType type) override { _contentType = type; }
void setContentType(std::string const& contentType) override {
_headers[arangodb::StaticStrings::ContentTypeHeader] = contentType;
_contentType = ContentType::CUSTOM;
}
void setContentType(std::string&& contentType) override {
_headers[arangodb::StaticStrings::ContentTypeHeader] =
std::move(contentType);
_contentType = ContentType::CUSTOM;
}
// end - required by base
private:
//_responseCode - from Base
//_headers - from Base
ConnectionType _connectionType;
basics::StringBuffer _body;
ContentType _contentType;
};
}
#endif