mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
b98975344d
12
CHANGELOG
12
CHANGELOG
|
@ -24,9 +24,11 @@ devel
|
|||
* added Optimizer Rule for other indexes in Traversals
|
||||
this allows AQL traversals to use other indexes than the edge index.
|
||||
So traversals with filters on edges can now make use of more specific
|
||||
indexes. E.g.:
|
||||
FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar"
|
||||
Will prefer an Hash Index on [_from, foo] above the EdgeIndex.
|
||||
indexes, e.g.
|
||||
|
||||
FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar"
|
||||
|
||||
will prefer a Hash Index on [_from, foo] above the EdgeIndex.
|
||||
|
||||
* fixed epoch computation in hybrid logical clock
|
||||
|
||||
|
@ -68,12 +70,14 @@ devel
|
|||
* added module.context.createDocumentationRouter to replace module.context.apiDocumentation
|
||||
|
||||
* bug in RAFT implementation of reads. dethroned leader still answered
|
||||
requests in isolation
|
||||
requests in isolation
|
||||
|
||||
|
||||
v3.0.8 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* fixed issue #2005
|
||||
|
||||
* fixed issue #2039
|
||||
|
||||
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
## C/C++ Libraries
|
||||
|
||||
### Boost 1.58.0
|
||||
### Boost 1.61.0
|
||||
|
||||
* Project Home: http://www.boost.org/
|
||||
* License: Boost [boost software license](http://www.boost.org/LICENSE_1_0.txt)
|
||||
* License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.58.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp)
|
||||
* License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.61.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp)
|
||||
|
||||
### fpconv_dtoa
|
||||
|
||||
|
@ -75,18 +75,10 @@
|
|||
|
||||
## Programs
|
||||
|
||||
### autoconf
|
||||
### cmake
|
||||
|
||||
* Project Home: http://www.gnu.org/software/autoconf/autoconf.html
|
||||
* only used to generate code, not part of the distribution
|
||||
* License: configure [free as-is license](https://github.com/arangodb/arangodb/blob/master/configure#L11)
|
||||
* License: ax_cxx_compile_stdcxx_11.m4 [free as-is license](https://github.com/arangodb/arangodb/blob/master/m4/ax_cxx_compile_stdcxx_11.m4#L25)
|
||||
|
||||
### automake
|
||||
|
||||
* Project Home: https://www.gnu.org/software/automake/
|
||||
* only used to generate code, not part of the distribution
|
||||
* License: Makefile.in [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/master/Makefile.in)
|
||||
* Project Home: https://cmake.org/
|
||||
* License OSI-approved BSD 3-clause License [https://cmake.org/licensing/]
|
||||
|
||||
### Bison 3.0
|
||||
|
||||
|
|
|
@ -927,13 +927,16 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message);
|
||||
} else {
|
||||
// Only if the aresult was successful we will get here
|
||||
// Only if the result was successful we will get here
|
||||
arangodb::basics::StringBuffer& body = res->result->getBody();
|
||||
|
||||
std::shared_ptr<VPackBuilder> builder =
|
||||
VPackParser::fromJson(body.c_str(), body.length());
|
||||
VPackSlice resultSlice = builder->slice();
|
||||
TRI_ASSERT(resultSlice.isNumber());
|
||||
if (!resultSlice.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL, "got unexpected response from engine lock request");
|
||||
}
|
||||
auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>();
|
||||
TRI_ASSERT(engineId != 0);
|
||||
traverserEngines.emplace(engineId, shardSet);
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <velocypack/Validator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <stdexcept>
|
||||
|
@ -51,7 +52,8 @@ using namespace arangodb::rest;
|
|||
VppCommTask::VppCommTask(GeneralServer* server, TRI_socket_t sock,
|
||||
ConnectionInfo&& info, double timeout)
|
||||
: Task("VppCommTask"),
|
||||
GeneralCommTask(server, sock, std::move(info), timeout) {
|
||||
GeneralCommTask(server, sock, std::move(info), timeout),
|
||||
_authenticatedUser() {
|
||||
_protocol = "vpp";
|
||||
_readBuffer.reserve(
|
||||
_bufferLength); // ATTENTION <- this is required so we do not
|
||||
|
@ -85,14 +87,12 @@ void VppCommTask::addResponse(VppResponse* response) {
|
|||
}
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "response -- end";
|
||||
|
||||
// FIXME (obi)
|
||||
// If the message is big we will create many small chunks in a loop.
|
||||
// For the first tests we just send single Messages
|
||||
|
||||
// adds chunk header infromation and creates SingBuffer* that can be
|
||||
// used with _writeBuffers
|
||||
auto buffers = createChunkForNetwork(slices, id,
|
||||
std::numeric_limits<std::size_t>::max());
|
||||
auto buffers = createChunkForNetwork(
|
||||
slices, id, (std::numeric_limits<std::size_t>::max)(),
|
||||
false); // set some sensible maxchunk
|
||||
// size and compression
|
||||
|
||||
double const totalTime = getAgent(id)->elapsedSinceReadStart();
|
||||
|
||||
|
@ -185,143 +185,17 @@ bool VppCommTask::processRead() {
|
|||
bool read_maybe_only_part_of_buffer = false;
|
||||
VppInputMessage message; // filled in CASE 1 or CASE 2b
|
||||
|
||||
// CASE 1: message is in one chunk
|
||||
if (chunkHeader._isFirst && chunkHeader._chunk == 1) {
|
||||
_agents.emplace(
|
||||
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
|
||||
|
||||
auto agent = getAgent(chunkHeader._messageID);
|
||||
agent->acquire();
|
||||
agent->requestStatisticsAgentSetReadStart();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk contains single message";
|
||||
std::size_t payloads = 0;
|
||||
|
||||
try {
|
||||
payloads = validateAndCount(vpackBegin, chunkEnd);
|
||||
} catch (std::exception const& e) {
|
||||
handleSimpleError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
|
||||
chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!"
|
||||
<< e.what();
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
} catch (...) {
|
||||
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
// CASE 1: message is in one chunk
|
||||
if (auto rv = getMessageFromSingleChunk(chunkHeader, message, doExecute,
|
||||
vpackBegin, chunkEnd)) {
|
||||
return *rv;
|
||||
}
|
||||
|
||||
VPackBuffer<uint8_t> buffer;
|
||||
buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme
|
||||
|
||||
// message._header = VPackSlice(message._buffer.data());
|
||||
// if (payloadOffset) {
|
||||
// message._payload = VPackSlice(message._buffer.data() + payloadOffset);
|
||||
// }
|
||||
|
||||
doExecute = true;
|
||||
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
|
||||
}
|
||||
// CASE 2: message is in multiple chunks
|
||||
auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID);
|
||||
|
||||
// CASE 2a: chunk starts new message
|
||||
if (chunkHeader._isFirst) { // first chunk of multi chunk message
|
||||
_agents.emplace(
|
||||
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
|
||||
|
||||
auto agent = getAgent(chunkHeader._messageID);
|
||||
agent->acquire();
|
||||
agent->requestStatisticsAgentSetReadStart();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk starts a new message";
|
||||
if (incompleteMessageItr != _incompleteMessages.end()) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "Message should be first but is already in the Map of incomplete "
|
||||
"messages";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
} else {
|
||||
if (auto rv = getMessageFromMultiChunks(chunkHeader, message, doExecute,
|
||||
vpackBegin, chunkEnd)) {
|
||||
return *rv;
|
||||
}
|
||||
|
||||
// TODO: is a 32bit value sufficient for the messageLength here?
|
||||
IncompleteVPackMessage message(
|
||||
static_cast<uint32_t>(chunkHeader._messageLength),
|
||||
chunkHeader._chunk /*number of chunks*/);
|
||||
message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
auto insertPair = _incompleteMessages.emplace(
|
||||
std::make_pair(chunkHeader._messageID, std::move(message)));
|
||||
if (!insertPair.second) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "insert failed";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// CASE 2b: chunk continues a message
|
||||
} else { // followup chunk of some mesage
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk continues a message";
|
||||
if (incompleteMessageItr == _incompleteMessages.end()) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "found message without previous part";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
auto& im = incompleteMessageItr->second; // incomplete Message
|
||||
im._currentChunk++;
|
||||
assert(im._currentChunk == chunkHeader._chunk);
|
||||
im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
// check buffer longer than length
|
||||
|
||||
// MESSAGE COMPLETE
|
||||
if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk completes a message";
|
||||
std::size_t payloads = 0;
|
||||
|
||||
try {
|
||||
payloads =
|
||||
validateAndCount(reinterpret_cast<char const*>(im._buffer.data()),
|
||||
reinterpret_cast<char const*>(
|
||||
im._buffer.data() + im._buffer.byteSize()));
|
||||
|
||||
} catch (std::exception const& e) {
|
||||
handleSimpleError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
|
||||
chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!"
|
||||
<< e.what();
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
} catch (...) {
|
||||
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
message.set(chunkHeader._messageID, std::move(im._buffer), payloads);
|
||||
_incompleteMessages.erase(incompleteMessageItr);
|
||||
// check length
|
||||
|
||||
doExecute = true;
|
||||
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
|
||||
}
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "chunk does not complete a message";
|
||||
}
|
||||
|
||||
read_maybe_only_part_of_buffer = true;
|
||||
|
@ -346,6 +220,7 @@ bool VppCommTask::processRead() {
|
|||
<< "\"," << message.payload().toJson()
|
||||
<< "\"";
|
||||
|
||||
// get type of request
|
||||
int type = meta::underlyingValue(rest::RequestType::ILLEGAL);
|
||||
try {
|
||||
type = header.at(1).getInt();
|
||||
|
@ -357,14 +232,47 @@ bool VppCommTask::processRead() {
|
|||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// handle request types
|
||||
if (type == 1000) {
|
||||
// do auth
|
||||
// do authentication
|
||||
// std::string encryption = header.at(2).copyString();
|
||||
std::string user = header.at(3).copyString();
|
||||
std::string pass = header.at(4).copyString();
|
||||
auto auth = basics::StringUtils::encodeBase64(user + ":" + pass);
|
||||
AuthResult result = GeneralServerFeature::AUTH_INFO.checkAuthentication(
|
||||
AuthInfo::AuthType::BASIC, auth);
|
||||
|
||||
if (result._authorized) {
|
||||
_authenticatedUser = std::move(user);
|
||||
handleSimpleError(rest::ResponseCode::OK, TRI_ERROR_NO_ERROR,
|
||||
"authentication successful", chunkHeader._messageID);
|
||||
} else {
|
||||
_authenticatedUser.clear();
|
||||
handleSimpleError(rest::ResponseCode::UNAUTHORIZED,
|
||||
TRI_ERROR_HTTP_UNAUTHORIZED, "authentication failed",
|
||||
chunkHeader._messageID);
|
||||
}
|
||||
} else {
|
||||
// check auth
|
||||
// the handler will take ownersip of this pointer
|
||||
std::unique_ptr<VppRequest> request(new VppRequest(
|
||||
_connectionInfo, std::move(message), chunkHeader._messageID));
|
||||
GeneralServerFeature::HANDLER_FACTORY->setRequestContext(request.get());
|
||||
request->setUser(_authenticatedUser);
|
||||
|
||||
// check authentication
|
||||
std::string const& dbname = request->databaseName();
|
||||
if (!_authenticatedUser.empty() || !dbname.empty()) {
|
||||
AuthLevel level = GeneralServerFeature::AUTH_INFO.canUseDatabase(
|
||||
_authenticatedUser, dbname);
|
||||
|
||||
if (level != AuthLevel::RW) {
|
||||
handleSimpleError(
|
||||
rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_FORBIDDEN,
|
||||
TRI_errno_string(TRI_ERROR_FORBIDDEN), chunkHeader._messageID);
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we have a database
|
||||
if (request->requestContext() == nullptr) {
|
||||
handleSimpleError(rest::ResponseCode::NOT_FOUND,
|
||||
|
@ -455,3 +363,149 @@ void VppCommTask::handleSimpleError(rest::ResponseCode responseCode,
|
|||
_clientClosed = true;
|
||||
}
|
||||
}
|
||||
|
||||
boost::optional<bool> VppCommTask::getMessageFromSingleChunk(
|
||||
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
|
||||
char const* vpackBegin, char const* chunkEnd) {
|
||||
// add agent for this new message
|
||||
_agents.emplace(
|
||||
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
|
||||
|
||||
auto agent = getAgent(chunkHeader._messageID);
|
||||
agent->acquire();
|
||||
agent->requestStatisticsAgentSetReadStart();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk contains single message";
|
||||
std::size_t payloads = 0;
|
||||
|
||||
try {
|
||||
payloads = validateAndCount(vpackBegin, chunkEnd);
|
||||
} catch (std::exception const& e) {
|
||||
handleSimpleError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
|
||||
chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!"
|
||||
<< e.what();
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
} catch (...) {
|
||||
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
VPackBuffer<uint8_t> buffer;
|
||||
buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme
|
||||
|
||||
doExecute = true;
|
||||
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
|
||||
return boost::none;
|
||||
}
|
||||
|
||||
boost::optional<bool> VppCommTask::getMessageFromMultiChunks(
|
||||
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
|
||||
char const* vpackBegin, char const* chunkEnd) {
|
||||
// CASE 2: message is in multiple chunks
|
||||
auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID);
|
||||
|
||||
// CASE 2a: chunk starts new message
|
||||
if (chunkHeader._isFirst) { // first chunk of multi chunk message
|
||||
// add agent for this new message
|
||||
_agents.emplace(
|
||||
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
|
||||
|
||||
auto agent = getAgent(chunkHeader._messageID);
|
||||
agent->acquire();
|
||||
agent->requestStatisticsAgentSetReadStart();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk starts a new message";
|
||||
if (incompleteMessageItr != _incompleteMessages.end()) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "Message should be first but is already in the Map of "
|
||||
"incomplete "
|
||||
"messages";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: is a 32bit value sufficient for the messageLength here?
|
||||
IncompleteVPackMessage message(
|
||||
static_cast<uint32_t>(chunkHeader._messageLength),
|
||||
chunkHeader._chunk /*number of chunks*/);
|
||||
message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
auto insertPair = _incompleteMessages.emplace(
|
||||
std::make_pair(chunkHeader._messageID, std::move(message)));
|
||||
if (!insertPair.second) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "insert failed";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
// CASE 2b: chunk continues a message
|
||||
} else { // followup chunk of some mesage
|
||||
// do not add agent for this continued message
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk continues a message";
|
||||
if (incompleteMessageItr == _incompleteMessages.end()) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "found message without previous part";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
auto& im = incompleteMessageItr->second; // incomplete Message
|
||||
im._currentChunk++;
|
||||
assert(im._currentChunk == chunkHeader._chunk);
|
||||
im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
|
||||
// check buffer longer than length
|
||||
|
||||
// MESSAGE COMPLETE
|
||||
if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "chunk completes a message";
|
||||
std::size_t payloads = 0;
|
||||
|
||||
try {
|
||||
payloads =
|
||||
validateAndCount(reinterpret_cast<char const*>(im._buffer.data()),
|
||||
reinterpret_cast<char const*>(
|
||||
im._buffer.data() + im._buffer.byteSize()));
|
||||
|
||||
} catch (std::exception const& e) {
|
||||
handleSimpleError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
|
||||
chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!"
|
||||
<< e.what();
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
} catch (...) {
|
||||
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
|
||||
<< "VPack Validation failed!";
|
||||
closeTask(rest::ResponseCode::BAD);
|
||||
return false;
|
||||
}
|
||||
|
||||
message.set(chunkHeader._messageID, std::move(im._buffer), payloads);
|
||||
_incompleteMessages.erase(incompleteMessageItr);
|
||||
// check length
|
||||
|
||||
doExecute = true;
|
||||
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
|
||||
}
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "VppCommTask: "
|
||||
<< "chunk does not complete a message";
|
||||
}
|
||||
return boost::none;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "lib/Rest/VppRequest.h"
|
||||
#include "lib/Rest/VppResponse.h"
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace arangodb {
|
||||
|
@ -122,6 +123,16 @@ class VppCommTask : public GeneralCommTask {
|
|||
ChunkHeader readChunkHeader(); // sub-function of processRead
|
||||
void replyToIncompleteMessages();
|
||||
|
||||
boost::optional<bool> getMessageFromSingleChunk(
|
||||
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
|
||||
char const* vpackBegin, char const* chunkEnd);
|
||||
|
||||
boost::optional<bool> getMessageFromMultiChunks(
|
||||
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
|
||||
char const* vpackBegin, char const* chunkEnd);
|
||||
|
||||
std::string _authenticatedUser;
|
||||
|
||||
// user
|
||||
// authenticated or not
|
||||
// database aus url
|
||||
|
|
|
@ -209,11 +209,11 @@ void PrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
}
|
||||
|
||||
int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "insert() called for primary index");
|
||||
}
|
||||
|
||||
int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "remove() called for primary index");
|
||||
}
|
||||
|
||||
/// @brief unload the index data from memory
|
||||
|
|
|
@ -35,10 +35,6 @@
|
|||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "V8Server/V8DealerFeature.h"
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
#include "Enterprise/Version.h"
|
||||
#endif
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::application_features;
|
||||
using namespace arangodb::options;
|
||||
|
@ -160,13 +156,8 @@ void BootstrapFeature::start() {
|
|||
// Start service properly:
|
||||
rest::RestHandlerFactory::setMaintenance(false);
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
LOG(INFO) << "ArangoDB (enterprise version " << ARANGODB_VERSION_FULL
|
||||
<< " / " << ENTERPRISE_VERSION << ") is ready for business. Have fun!";
|
||||
#else
|
||||
LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL
|
||||
<< ") is ready for business. Have fun!";
|
||||
#endif
|
||||
|
||||
if (_bark) {
|
||||
LOG(INFO) << "The dog says: wau wau!";
|
||||
|
|
|
@ -1917,7 +1917,23 @@ static void JS_VersionServer(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION));
|
||||
bool details = false;
|
||||
if (args.Length() > 0) {
|
||||
details = TRI_ObjectToBoolean(args[0]);
|
||||
}
|
||||
|
||||
if (!details) {
|
||||
// return version string
|
||||
TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION));
|
||||
}
|
||||
|
||||
// return version details
|
||||
VPackBuilder builder;
|
||||
builder.openObject();
|
||||
rest::Version::getVPack(builder);
|
||||
builder.close();
|
||||
|
||||
TRI_V8_RETURN(TRI_VPackToV8(isolate, builder.slice()));
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ class IndexFiller {
|
|||
|
||||
void operator()() {
|
||||
int res = TRI_ERROR_INTERNAL;
|
||||
TRI_ASSERT(_idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
try {
|
||||
res = _collection->fillIndex(_trx, _idx);
|
||||
|
@ -210,7 +211,6 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
|
|||
VPackSlice value = info.get("type");
|
||||
|
||||
if (!value.isString()) {
|
||||
// FIXME Intenral Compatibility.
|
||||
// Compatibility with old v8-vocindex.
|
||||
if (generateKey) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
|
@ -232,8 +232,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
|
|||
iid = Helper::getNumericValue<TRI_idx_iid_t>(info, "id", 0);
|
||||
} else if (!generateKey) {
|
||||
// In the restore case it is forbidden to NOT have id
|
||||
LOG(ERR) << "ignoring index, index identifier could not be located";
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot restore index without index identifier");
|
||||
}
|
||||
|
||||
if (iid == 0 && !isClusterConstructor) {
|
||||
|
@ -249,7 +248,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
|
|||
case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: {
|
||||
if (!isClusterConstructor) {
|
||||
// this indexes cannot be created directly
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create primary index");
|
||||
}
|
||||
newIdx.reset(new arangodb::PrimaryIndex(col));
|
||||
break;
|
||||
|
@ -257,7 +256,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
|
|||
case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: {
|
||||
if (!isClusterConstructor) {
|
||||
// this indexes cannot be created directly
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create edge index");
|
||||
}
|
||||
newIdx.reset(new arangodb::EdgeIndex(iid, col));
|
||||
break;
|
||||
|
@ -281,7 +280,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
|
|||
break;
|
||||
#else
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED,
|
||||
"index type not supported in this build");
|
||||
"index type 'persistent' not supported in this build");
|
||||
#endif
|
||||
}
|
||||
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {
|
||||
|
@ -445,7 +444,12 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
if (!isCluster) {
|
||||
createInitialIndexes();
|
||||
}
|
||||
*/
|
||||
auto indexesSlice = info.get("indexes");
|
||||
if (indexesSlice.isArray()) {
|
||||
bool const isCluster = ServerState::instance()->isRunningInCluster();
|
||||
|
@ -457,15 +461,27 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
|
|||
// TODO Handle Properly
|
||||
continue;
|
||||
}
|
||||
|
||||
auto idx = PrepareIndexFromSlice(v, false, this, true);
|
||||
|
||||
if (isCluster) {
|
||||
addIndexCoordinator(idx, false);
|
||||
} else {
|
||||
/* if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
idx->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
|
||||
// already added those types earlier
|
||||
continue;
|
||||
}
|
||||
*/
|
||||
addIndex(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_indexes.empty()) {
|
||||
createInitialIndexes();
|
||||
}
|
||||
|
||||
if (!ServerState::instance()->isCoordinator() && isPhysical) {
|
||||
// If we are not in the coordinator we need a path
|
||||
// to the physical data.
|
||||
|
@ -764,6 +780,7 @@ LogicalCollection::getIndexes() const {
|
|||
// or it's indexes are freed the pointer returned will get invalidated.
|
||||
arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const {
|
||||
TRI_ASSERT(!_indexes.empty());
|
||||
TRI_ASSERT(_indexes[0]->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
// the primary index must be the index at position #0
|
||||
return static_cast<arangodb::PrimaryIndex*>(_indexes[0].get());
|
||||
}
|
||||
|
@ -1124,7 +1141,7 @@ PhysicalCollection* LogicalCollection::createPhysical() {
|
|||
void LogicalCollection::open(bool ignoreErrors) {
|
||||
VPackBuilder builder;
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
engine->getCollectionInfo(_vocbase, cid(), builder, false, 0);
|
||||
engine->getCollectionInfo(_vocbase, cid(), builder, true, 0);
|
||||
|
||||
double start = TRI_microtime();
|
||||
|
||||
|
@ -1135,17 +1152,9 @@ void LogicalCollection::open(bool ignoreErrors) {
|
|||
int res = openWorker(ignoreErrors);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG(ERR) << "cannot open document collection from path '" << path() << "'";
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot open document collection from path '") + path() + "': " + TRI_errno_string(res));
|
||||
}
|
||||
|
||||
res = createInitialIndexes();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res);
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
arangodb::SingleCollectionTransaction trx(
|
||||
arangodb::StandaloneTransactionContext::Create(_vocbase),
|
||||
cid(), TRI_TRANSACTION_WRITE);
|
||||
|
@ -1309,6 +1318,7 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
|
|||
return idx;
|
||||
}
|
||||
|
||||
TRI_ASSERT(idx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
int res = fillIndex(trx, idx.get(), false);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -1336,21 +1346,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
|
|||
if (!info.isObject()) {
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
/* FIXME Old style First check if iid is okay and update server tick
|
||||
TRI_idx_iid_t iid;
|
||||
if (iis.isNumber()) {
|
||||
iid = iis.getNumericValue<TRI_idx_iid_t>();
|
||||
} else if (iis.isString()) {
|
||||
std::string tmp = iis.copyString();
|
||||
iid = static_cast<TRI_idx_iid_t>(basics::StringUtils::uint64(tmp));
|
||||
} else {
|
||||
LOG(ERR) << "ignoring index, index identifier could not be located";
|
||||
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
TRI_UpdateTickServer(iid);
|
||||
*/
|
||||
|
||||
// We create a new Index object to make sure that the index
|
||||
// is not handed out except for a successful case.
|
||||
std::shared_ptr<Index> newIdx;
|
||||
|
@ -1366,6 +1362,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
|
|||
// FIXME New style. Update tick after successful creation of index.
|
||||
TRI_UpdateTickServer(newIdx->id());
|
||||
|
||||
TRI_ASSERT(newIdx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
int res = fillIndex(trx, newIdx.get());
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -1516,21 +1513,16 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
|
|||
}
|
||||
|
||||
/// @brief creates the initial indexes for the collection
|
||||
int LogicalCollection::createInitialIndexes() {
|
||||
void LogicalCollection::createInitialIndexes() {
|
||||
// TODO Properly fix this. The outside should make sure that only NEW collections
|
||||
// try to create the indexes.
|
||||
if (!_indexes.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return;
|
||||
}
|
||||
|
||||
// create primary index
|
||||
auto primaryIndex = std::make_shared<arangodb::PrimaryIndex>(this);
|
||||
|
||||
try {
|
||||
addIndex(primaryIndex);
|
||||
} catch (...) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
addIndex(primaryIndex);
|
||||
|
||||
// create edges index
|
||||
if (_type == TRI_COL_TYPE_EDGE) {
|
||||
|
@ -1539,16 +1531,10 @@ int LogicalCollection::createInitialIndexes() {
|
|||
iid = _planId;
|
||||
}
|
||||
|
||||
try {
|
||||
auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
|
||||
auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
|
||||
|
||||
addIndex(edgeIndex);
|
||||
} catch (...) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
addIndex(edgeIndex);
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief iterator for index open
|
||||
|
@ -1633,6 +1619,7 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
|
|||
// now actually fill the secondary indexes
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = _indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
// index threads must come first, otherwise this thread will block the
|
||||
// loop and
|
||||
|
@ -1685,6 +1672,9 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
|
|||
}
|
||||
|
||||
void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
||||
// primary index must be added at position 0
|
||||
TRI_ASSERT(idx->type() != arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX || _indexes.empty());
|
||||
|
||||
_indexes.emplace_back(idx);
|
||||
|
||||
// update statistics
|
||||
|
@ -2364,6 +2354,7 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
|
|||
int LogicalCollection::fillIndex(arangodb::Transaction* trx,
|
||||
arangodb::Index* idx,
|
||||
bool skipPersistent) {
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
if (!useSecondaryIndexes()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
@ -2490,6 +2481,7 @@ int LogicalCollection::fillIndexSequential(arangodb::Transaction* trx,
|
|||
auto primaryIndex = this->primaryIndex();
|
||||
size_t nrUsed = primaryIndex->size();
|
||||
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
idx->sizeHint(trx, nrUsed);
|
||||
|
||||
if (nrUsed > 0) {
|
||||
|
@ -2965,6 +2957,7 @@ int LogicalCollection::insertSecondaryIndexes(
|
|||
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = _indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
if (!useSecondary && !idx->isPersistent()) {
|
||||
continue;
|
||||
|
@ -3007,6 +3000,7 @@ int LogicalCollection::deleteSecondaryIndexes(
|
|||
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = _indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
if (!useSecondary && !idx->isPersistent()) {
|
||||
continue;
|
||||
|
|
|
@ -359,7 +359,7 @@ class LogicalCollection {
|
|||
// SECTION: Index creation
|
||||
|
||||
/// @brief creates the initial indexes for the collection
|
||||
int createInitialIndexes();
|
||||
void createInitialIndexes();
|
||||
|
||||
int openWorker(bool ignoreErrors);
|
||||
|
||||
|
|
|
@ -306,9 +306,14 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
|
|||
_vertexExpressions.reserve(read.length());
|
||||
for (auto const& info : VPackObjectIterator(read)) {
|
||||
size_t d = basics::StringUtils::uint64(info.key.copyString());
|
||||
#ifdef ARANGODB_ENABLE_MAINAINER_MODE
|
||||
auto it = _vertexExpressions.emplace(
|
||||
d, new aql::Expression(query->ast(), info.value));
|
||||
TRI_ASSERT(it.second);
|
||||
#else
|
||||
_vertexExpressions.emplace(
|
||||
d, new aql::Expression(query->ast(), info.value));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -303,14 +303,6 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
|
|||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
try {
|
||||
// Maybe the ordering is broken now
|
||||
// create document collection
|
||||
int res = collection->createInitialIndexes();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
// cid might have been assigned
|
||||
cid = collection->cid();
|
||||
|
||||
|
|
|
@ -414,6 +414,8 @@
|
|||
// add those filters also to the collection
|
||||
self.collection.addFilter(f.attribute, f.operator, f.value);
|
||||
});
|
||||
|
||||
self.rerender();
|
||||
},
|
||||
|
||||
addFilterItem: function () {
|
||||
|
|
|
@ -1133,6 +1133,8 @@
|
|||
arangoHelper.arangoError('Graph', 'Could not expand node: ' + id + '.');
|
||||
}
|
||||
});
|
||||
|
||||
self.removeHelp();
|
||||
},
|
||||
|
||||
checkExpand: function (data, origin) {
|
||||
|
@ -1671,13 +1673,14 @@
|
|||
e.color = e.originalColor;
|
||||
});
|
||||
|
||||
$('.nodeInfoDiv').remove();
|
||||
s.refresh({ skipIndexation: true });
|
||||
}
|
||||
};
|
||||
|
||||
s.bind('rightClickStage', function (e) {
|
||||
unhighlightNodes();
|
||||
self.nodeHighlighted = 'undefinedid';
|
||||
unhighlightNodes();
|
||||
});
|
||||
|
||||
s.bind('rightClickNode', function (e) {
|
||||
|
|
|
@ -258,7 +258,6 @@
|
|||
background-color: $c-bluegrey-dark;
|
||||
border-radius: 2px;
|
||||
color: $c-white;
|
||||
padding: 10px;
|
||||
padding-left: 150px;
|
||||
padding: 10px 20px;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -526,12 +526,13 @@ ArangoDatabase.prototype._dropIndex = function (id) {
|
|||
// / @brief returns the database version
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ArangoDatabase.prototype._version = function () {
|
||||
var requestResult = this._connection.GET('/_api/version');
|
||||
ArangoDatabase.prototype._version = function (details) {
|
||||
var requestResult = this._connection.GET('/_api/version' +
|
||||
(details ? '?details=true' : ''));
|
||||
|
||||
arangosh.checkRequestResult(requestResult);
|
||||
|
||||
return requestResult.version;
|
||||
return details ? requestResult : requestResult.version;
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1551,7 +1551,7 @@ function ahuacatlQueryGeneralTraversalTestSuite() {
|
|||
);
|
||||
},
|
||||
|
||||
testGRAPH_SHOTEST_PATH_with_stopAtFirstMatch: function () {
|
||||
testGRAPH_SHORTEST_PATH_with_stopAtFirstMatch: function () {
|
||||
var actual;
|
||||
|
||||
actual = getQueryResults("FOR e IN arangodb::GRAPH_SHORTEST_PATH('werKenntWen', 'UnitTests_Frankfurter/Fritz', " +
|
||||
|
|
|
@ -288,7 +288,7 @@ void WindowsServiceFeature::installService() {
|
|||
}
|
||||
|
||||
SERVICE_DESCRIPTION description = {
|
||||
"multi-model NoSQL database (version " ARANGODB_VERSION ")"};
|
||||
"multi-model NoSQL database (version " ARANGODB_VERSION_FULL ")"};
|
||||
ChangeServiceConfig2(schService, SERVICE_CONFIG_DESCRIPTION, &description);
|
||||
|
||||
std::cout << "INFO: added service with command line '" << command << "'"
|
||||
|
|
|
@ -115,7 +115,7 @@ void Version::initialize() {
|
|||
|
||||
|
||||
#if USE_ENTERPRISE
|
||||
Values["enterprise-version"] = ENTERPRISE_VERSION;
|
||||
Values["enterprise-version"] = ARANGODB_ENTERPRISE_VERSION;
|
||||
#endif
|
||||
|
||||
#if HAVE_ARANGODB_BUILD_REPOSITORY
|
||||
|
@ -416,6 +416,8 @@ std::string Version::getDetailed() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void Version::getVPack(VPackBuilder& dst) {
|
||||
TRI_ASSERT(!dst.isClosed());
|
||||
|
||||
for (auto const& it : Values) {
|
||||
std::string const& value = it.second;
|
||||
|
||||
|
|
|
@ -28,12 +28,33 @@
|
|||
|
||||
#include "Basics/build.h"
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
#include "Enterprise/Version.h"
|
||||
|
||||
#ifndef ARANGODB_ENTERPRISE_VERSION
|
||||
#error "enterprise version number is not defined"
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "-DEBUG]"
|
||||
#else
|
||||
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "]"
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#ifdef ARANGODB_ENTERPRISE_VERSION
|
||||
#error "enterprise version number should not be defined"
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "-DEBUG]"
|
||||
#else
|
||||
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "]"
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
|
@ -43,122 +64,65 @@ namespace rest {
|
|||
|
||||
class Version {
|
||||
private:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create the version information
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Version() = delete;
|
||||
Version(Version const&) = delete;
|
||||
Version& operator=(Version const&) = delete;
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief parse a version string into major, minor
|
||||
/// returns -1, -1 when the version string has an invalid format
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::pair<int, int> parseVersionString(std::string const&);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initialize
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void initialize();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get numeric server version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int32_t getNumericServerVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get server version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getServerVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get BOOST version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getBoostVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get V8 version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getV8Version();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get OpenSSL version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getOpenSSLVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get libev version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getLibevVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get vpack version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getVPackVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get zlib version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getZLibVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get ICU version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getICUVersion();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get compiler
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getCompiler();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get endianness
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getEndianness();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get build date
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getBuildDate();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get build repository
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getBuildRepository();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return a server version string
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getVerboseVersionString();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get detailed version information as a (multi-line) string
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::string getDetailed();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief VelocyPack all data
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void getVPack(arangodb::velocypack::Builder&);
|
||||
|
||||
public:
|
||||
|
|
|
@ -70,7 +70,7 @@ struct VppInputMessage {
|
|||
if (!_payload.empty()) {
|
||||
return _payload.front();
|
||||
}
|
||||
return VPackSlice{};
|
||||
return VPackSlice::noneSlice();
|
||||
}
|
||||
|
||||
std::vector<VPackSlice> const& payloads() const { return _payload; }
|
||||
|
@ -111,7 +111,7 @@ struct VPackMessageNoOwnBuffer {
|
|||
if (_payloads.size() && _generateBody) {
|
||||
return _payloads.front();
|
||||
}
|
||||
return arangodb::basics::VelocyPackHelper::NullValue();
|
||||
return VPackSlice::noneSlice();
|
||||
}
|
||||
|
||||
std::vector<VPackSlice> payloads() { return _payloads; }
|
||||
|
|
Loading…
Reference in New Issue