1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Kaveh Vahedipour 2016-09-09 14:47:07 +02:00
commit b98975344d
21 changed files with 332 additions and 299 deletions

View File

@ -24,9 +24,11 @@ devel
* added Optimizer Rule for other indexes in Traversals * added Optimizer Rule for other indexes in Traversals
this allows AQL traversals to use other indexes than the edge index. this allows AQL traversals to use other indexes than the edge index.
So traversals with filters on edges can now make use of more specific So traversals with filters on edges can now make use of more specific
indexes. E.g.: indexes, e.g.
FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar"
Will prefer an Hash Index on [_from, foo] above the EdgeIndex. FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar"
will prefer a Hash Index on [_from, foo] above the EdgeIndex.
* fixed epoch computation in hybrid logical clock * fixed epoch computation in hybrid logical clock
@ -68,12 +70,14 @@ devel
* added module.context.createDocumentationRouter to replace module.context.apiDocumentation * added module.context.createDocumentationRouter to replace module.context.apiDocumentation
* bug in RAFT implementation of reads. dethroned leader still answered * bug in RAFT implementation of reads. dethroned leader still answered
requests in isolation requests in isolation
v3.0.8 (XXXX-XX-XX) v3.0.8 (XXXX-XX-XX)
------------------- -------------------
* fixed issue #2005
* fixed issue #2039 * fixed issue #2039

View File

@ -2,11 +2,11 @@
## C/C++ Libraries ## C/C++ Libraries
### Boost 1.58.0 ### Boost 1.61.0
* Project Home: http://www.boost.org/ * Project Home: http://www.boost.org/
* License: Boost [boost software license](http://www.boost.org/LICENSE_1_0.txt) * License: Boost [boost software license](http://www.boost.org/LICENSE_1_0.txt)
* License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.58.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp) * License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.61.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp)
### fpconv_dtoa ### fpconv_dtoa
@ -75,18 +75,10 @@
## Programs ## Programs
### autoconf ### cmake
* Project Home: http://www.gnu.org/software/autoconf/autoconf.html * Project Home: https://cmake.org/
* only used to generate code, not part of the distribution * License OSI-approved BSD 3-clause License [https://cmake.org/licensing/]
* License: configure [free as-is license](https://github.com/arangodb/arangodb/blob/master/configure#L11)
* License: ax_cxx_compile_stdcxx_11.m4 [free as-is license](https://github.com/arangodb/arangodb/blob/master/m4/ax_cxx_compile_stdcxx_11.m4#L25)
### automake
* Project Home: https://www.gnu.org/software/automake/
* only used to generate code, not part of the distribution
* License: Makefile.in [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/master/Makefile.in)
### Bison 3.0 ### Bison 3.0

View File

@ -927,13 +927,16 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
THROW_ARANGO_EXCEPTION_MESSAGE( THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message); TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message);
} else { } else {
// Only if the aresult was successful we will get here // Only if the result was successful we will get here
arangodb::basics::StringBuffer& body = res->result->getBody(); arangodb::basics::StringBuffer& body = res->result->getBody();
std::shared_ptr<VPackBuilder> builder = std::shared_ptr<VPackBuilder> builder =
VPackParser::fromJson(body.c_str(), body.length()); VPackParser::fromJson(body.c_str(), body.length());
VPackSlice resultSlice = builder->slice(); VPackSlice resultSlice = builder->slice();
TRI_ASSERT(resultSlice.isNumber()); if (!resultSlice.isNumber()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_INTERNAL, "got unexpected response from engine lock request");
}
auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>(); auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>();
TRI_ASSERT(engineId != 0); TRI_ASSERT(engineId != 0);
traverserEngines.emplace(engineId, shardSet); traverserEngines.emplace(engineId, shardSet);

View File

@ -40,6 +40,7 @@
#include <velocypack/Validator.h> #include <velocypack/Validator.h>
#include <velocypack/velocypack-aliases.h> #include <velocypack/velocypack-aliases.h>
#include <boost/optional.hpp>
#include <iostream> #include <iostream>
#include <limits> #include <limits>
#include <stdexcept> #include <stdexcept>
@ -51,7 +52,8 @@ using namespace arangodb::rest;
VppCommTask::VppCommTask(GeneralServer* server, TRI_socket_t sock, VppCommTask::VppCommTask(GeneralServer* server, TRI_socket_t sock,
ConnectionInfo&& info, double timeout) ConnectionInfo&& info, double timeout)
: Task("VppCommTask"), : Task("VppCommTask"),
GeneralCommTask(server, sock, std::move(info), timeout) { GeneralCommTask(server, sock, std::move(info), timeout),
_authenticatedUser() {
_protocol = "vpp"; _protocol = "vpp";
_readBuffer.reserve( _readBuffer.reserve(
_bufferLength); // ATTENTION <- this is required so we do not _bufferLength); // ATTENTION <- this is required so we do not
@ -85,14 +87,12 @@ void VppCommTask::addResponse(VppResponse* response) {
} }
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "response -- end"; LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "response -- end";
// FIXME (obi)
// If the message is big we will create many small chunks in a loop.
// For the first tests we just send single Messages
// adds chunk header infromation and creates SingBuffer* that can be // adds chunk header infromation and creates SingBuffer* that can be
// used with _writeBuffers // used with _writeBuffers
auto buffers = createChunkForNetwork(slices, id, auto buffers = createChunkForNetwork(
std::numeric_limits<std::size_t>::max()); slices, id, (std::numeric_limits<std::size_t>::max)(),
false); // set some sensible maxchunk
// size and compression
double const totalTime = getAgent(id)->elapsedSinceReadStart(); double const totalTime = getAgent(id)->elapsedSinceReadStart();
@ -185,143 +185,17 @@ bool VppCommTask::processRead() {
bool read_maybe_only_part_of_buffer = false; bool read_maybe_only_part_of_buffer = false;
VppInputMessage message; // filled in CASE 1 or CASE 2b VppInputMessage message; // filled in CASE 1 or CASE 2b
// CASE 1: message is in one chunk
if (chunkHeader._isFirst && chunkHeader._chunk == 1) { if (chunkHeader._isFirst && chunkHeader._chunk == 1) {
_agents.emplace( // CASE 1: message is in one chunk
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); if (auto rv = getMessageFromSingleChunk(chunkHeader, message, doExecute,
vpackBegin, chunkEnd)) {
auto agent = getAgent(chunkHeader._messageID); return *rv;
agent->acquire();
agent->requestStatisticsAgentSetReadStart();
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk contains single message";
std::size_t payloads = 0;
try {
payloads = validateAndCount(vpackBegin, chunkEnd);
} catch (std::exception const& e) {
handleSimpleError(rest::ResponseCode::BAD,
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!"
<< e.what();
closeTask(rest::ResponseCode::BAD);
return false;
} catch (...) {
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!";
closeTask(rest::ResponseCode::BAD);
return false;
} }
} else {
VPackBuffer<uint8_t> buffer; if (auto rv = getMessageFromMultiChunks(chunkHeader, message, doExecute,
buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); vpackBegin, chunkEnd)) {
message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme return *rv;
// message._header = VPackSlice(message._buffer.data());
// if (payloadOffset) {
// message._payload = VPackSlice(message._buffer.data() + payloadOffset);
// }
doExecute = true;
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
}
// CASE 2: message is in multiple chunks
auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID);
// CASE 2a: chunk starts new message
if (chunkHeader._isFirst) { // first chunk of multi chunk message
_agents.emplace(
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
auto agent = getAgent(chunkHeader._messageID);
agent->acquire();
agent->requestStatisticsAgentSetReadStart();
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk starts a new message";
if (incompleteMessageItr != _incompleteMessages.end()) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "Message should be first but is already in the Map of incomplete "
"messages";
closeTask(rest::ResponseCode::BAD);
return false;
} }
// TODO: is a 32bit value sufficient for the messageLength here?
IncompleteVPackMessage message(
static_cast<uint32_t>(chunkHeader._messageLength),
chunkHeader._chunk /*number of chunks*/);
message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
auto insertPair = _incompleteMessages.emplace(
std::make_pair(chunkHeader._messageID, std::move(message)));
if (!insertPair.second) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "insert failed";
closeTask(rest::ResponseCode::BAD);
return false;
}
// CASE 2b: chunk continues a message
} else { // followup chunk of some mesage
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk continues a message";
if (incompleteMessageItr == _incompleteMessages.end()) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "found message without previous part";
closeTask(rest::ResponseCode::BAD);
return false;
}
auto& im = incompleteMessageItr->second; // incomplete Message
im._currentChunk++;
assert(im._currentChunk == chunkHeader._chunk);
im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
// check buffer longer than length
// MESSAGE COMPLETE
if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk completes a message";
std::size_t payloads = 0;
try {
payloads =
validateAndCount(reinterpret_cast<char const*>(im._buffer.data()),
reinterpret_cast<char const*>(
im._buffer.data() + im._buffer.byteSize()));
} catch (std::exception const& e) {
handleSimpleError(rest::ResponseCode::BAD,
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!"
<< e.what();
closeTask(rest::ResponseCode::BAD);
return false;
} catch (...) {
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!";
closeTask(rest::ResponseCode::BAD);
return false;
}
message.set(chunkHeader._messageID, std::move(im._buffer), payloads);
_incompleteMessages.erase(incompleteMessageItr);
// check length
doExecute = true;
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
}
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "chunk does not complete a message";
} }
read_maybe_only_part_of_buffer = true; read_maybe_only_part_of_buffer = true;
@ -346,6 +220,7 @@ bool VppCommTask::processRead() {
<< "\"," << message.payload().toJson() << "\"," << message.payload().toJson()
<< "\""; << "\"";
// get type of request
int type = meta::underlyingValue(rest::RequestType::ILLEGAL); int type = meta::underlyingValue(rest::RequestType::ILLEGAL);
try { try {
type = header.at(1).getInt(); type = header.at(1).getInt();
@ -357,14 +232,47 @@ bool VppCommTask::processRead() {
closeTask(rest::ResponseCode::BAD); closeTask(rest::ResponseCode::BAD);
return false; return false;
} }
// handle request types
if (type == 1000) { if (type == 1000) {
// do auth // do authentication
// std::string encryption = header.at(2).copyString();
std::string user = header.at(3).copyString();
std::string pass = header.at(4).copyString();
auto auth = basics::StringUtils::encodeBase64(user + ":" + pass);
AuthResult result = GeneralServerFeature::AUTH_INFO.checkAuthentication(
AuthInfo::AuthType::BASIC, auth);
if (result._authorized) {
_authenticatedUser = std::move(user);
handleSimpleError(rest::ResponseCode::OK, TRI_ERROR_NO_ERROR,
"authentication successful", chunkHeader._messageID);
} else {
_authenticatedUser.clear();
handleSimpleError(rest::ResponseCode::UNAUTHORIZED,
TRI_ERROR_HTTP_UNAUTHORIZED, "authentication failed",
chunkHeader._messageID);
}
} else { } else {
// check auth
// the handler will take ownersip of this pointer // the handler will take ownersip of this pointer
std::unique_ptr<VppRequest> request(new VppRequest( std::unique_ptr<VppRequest> request(new VppRequest(
_connectionInfo, std::move(message), chunkHeader._messageID)); _connectionInfo, std::move(message), chunkHeader._messageID));
GeneralServerFeature::HANDLER_FACTORY->setRequestContext(request.get()); GeneralServerFeature::HANDLER_FACTORY->setRequestContext(request.get());
request->setUser(_authenticatedUser);
// check authentication
std::string const& dbname = request->databaseName();
if (!_authenticatedUser.empty() || !dbname.empty()) {
AuthLevel level = GeneralServerFeature::AUTH_INFO.canUseDatabase(
_authenticatedUser, dbname);
if (level != AuthLevel::RW) {
handleSimpleError(
rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_FORBIDDEN,
TRI_errno_string(TRI_ERROR_FORBIDDEN), chunkHeader._messageID);
}
}
// make sure we have a database // make sure we have a database
if (request->requestContext() == nullptr) { if (request->requestContext() == nullptr) {
handleSimpleError(rest::ResponseCode::NOT_FOUND, handleSimpleError(rest::ResponseCode::NOT_FOUND,
@ -455,3 +363,149 @@ void VppCommTask::handleSimpleError(rest::ResponseCode responseCode,
_clientClosed = true; _clientClosed = true;
} }
} }
boost::optional<bool> VppCommTask::getMessageFromSingleChunk(
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
char const* vpackBegin, char const* chunkEnd) {
// add agent for this new message
_agents.emplace(
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
auto agent = getAgent(chunkHeader._messageID);
agent->acquire();
agent->requestStatisticsAgentSetReadStart();
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk contains single message";
std::size_t payloads = 0;
try {
payloads = validateAndCount(vpackBegin, chunkEnd);
} catch (std::exception const& e) {
handleSimpleError(rest::ResponseCode::BAD,
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!"
<< e.what();
closeTask(rest::ResponseCode::BAD);
return false;
} catch (...) {
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!";
closeTask(rest::ResponseCode::BAD);
return false;
}
VPackBuffer<uint8_t> buffer;
buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme
doExecute = true;
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
return boost::none;
}
boost::optional<bool> VppCommTask::getMessageFromMultiChunks(
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
char const* vpackBegin, char const* chunkEnd) {
// CASE 2: message is in multiple chunks
auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID);
// CASE 2a: chunk starts new message
if (chunkHeader._isFirst) { // first chunk of multi chunk message
// add agent for this new message
_agents.emplace(
std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true)));
auto agent = getAgent(chunkHeader._messageID);
agent->acquire();
agent->requestStatisticsAgentSetReadStart();
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk starts a new message";
if (incompleteMessageItr != _incompleteMessages.end()) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "Message should be first but is already in the Map of "
"incomplete "
"messages";
closeTask(rest::ResponseCode::BAD);
return false;
}
// TODO: is a 32bit value sufficient for the messageLength here?
IncompleteVPackMessage message(
static_cast<uint32_t>(chunkHeader._messageLength),
chunkHeader._chunk /*number of chunks*/);
message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
auto insertPair = _incompleteMessages.emplace(
std::make_pair(chunkHeader._messageID, std::move(message)));
if (!insertPair.second) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "insert failed";
closeTask(rest::ResponseCode::BAD);
return false;
}
// CASE 2b: chunk continues a message
} else { // followup chunk of some mesage
// do not add agent for this continued message
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk continues a message";
if (incompleteMessageItr == _incompleteMessages.end()) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "found message without previous part";
closeTask(rest::ResponseCode::BAD);
return false;
}
auto& im = incompleteMessageItr->second; // incomplete Message
im._currentChunk++;
assert(im._currentChunk == chunkHeader._chunk);
im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd));
// check buffer longer than length
// MESSAGE COMPLETE
if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) {
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "chunk completes a message";
std::size_t payloads = 0;
try {
payloads =
validateAndCount(reinterpret_cast<char const*>(im._buffer.data()),
reinterpret_cast<char const*>(
im._buffer.data() + im._buffer.byteSize()));
} catch (std::exception const& e) {
handleSimpleError(rest::ResponseCode::BAD,
TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(),
chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!"
<< e.what();
closeTask(rest::ResponseCode::BAD);
return false;
} catch (...) {
handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID);
LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: "
<< "VPack Validation failed!";
closeTask(rest::ResponseCode::BAD);
return false;
}
message.set(chunkHeader._messageID, std::move(im._buffer), payloads);
_incompleteMessages.erase(incompleteMessageItr);
// check length
doExecute = true;
getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd();
}
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
<< "VppCommTask: "
<< "chunk does not complete a message";
}
return boost::none;
}

View File

@ -29,6 +29,7 @@
#include "lib/Rest/VppRequest.h" #include "lib/Rest/VppRequest.h"
#include "lib/Rest/VppResponse.h" #include "lib/Rest/VppResponse.h"
#include <boost/optional.hpp>
#include <stdexcept> #include <stdexcept>
namespace arangodb { namespace arangodb {
@ -122,6 +123,16 @@ class VppCommTask : public GeneralCommTask {
ChunkHeader readChunkHeader(); // sub-function of processRead ChunkHeader readChunkHeader(); // sub-function of processRead
void replyToIncompleteMessages(); void replyToIncompleteMessages();
boost::optional<bool> getMessageFromSingleChunk(
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
char const* vpackBegin, char const* chunkEnd);
boost::optional<bool> getMessageFromMultiChunks(
ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute,
char const* vpackBegin, char const* chunkEnd);
std::string _authenticatedUser;
// user // user
// authenticated or not // authenticated or not
// database aus url // database aus url

View File

@ -209,11 +209,11 @@ void PrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
} }
int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "insert() called for primary index");
} }
int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "remove() called for primary index");
} }
/// @brief unload the index data from memory /// @brief unload the index data from memory

View File

@ -35,10 +35,6 @@
#include "RestServer/DatabaseFeature.h" #include "RestServer/DatabaseFeature.h"
#include "V8Server/V8DealerFeature.h" #include "V8Server/V8DealerFeature.h"
#ifdef USE_ENTERPRISE
#include "Enterprise/Version.h"
#endif
using namespace arangodb; using namespace arangodb;
using namespace arangodb::application_features; using namespace arangodb::application_features;
using namespace arangodb::options; using namespace arangodb::options;
@ -160,13 +156,8 @@ void BootstrapFeature::start() {
// Start service properly: // Start service properly:
rest::RestHandlerFactory::setMaintenance(false); rest::RestHandlerFactory::setMaintenance(false);
#ifdef USE_ENTERPRISE
LOG(INFO) << "ArangoDB (enterprise version " << ARANGODB_VERSION_FULL
<< " / " << ENTERPRISE_VERSION << ") is ready for business. Have fun!";
#else
LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL
<< ") is ready for business. Have fun!"; << ") is ready for business. Have fun!";
#endif
if (_bark) { if (_bark) {
LOG(INFO) << "The dog says: wau wau!"; LOG(INFO) << "The dog says: wau wau!";

View File

@ -1917,7 +1917,23 @@ static void JS_VersionServer(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate); TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate); v8::HandleScope scope(isolate);
TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION)); bool details = false;
if (args.Length() > 0) {
details = TRI_ObjectToBoolean(args[0]);
}
if (!details) {
// return version string
TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION));
}
// return version details
VPackBuilder builder;
builder.openObject();
rest::Version::getVPack(builder);
builder.close();
TRI_V8_RETURN(TRI_VPackToV8(isolate, builder.slice()));
TRI_V8_TRY_CATCH_END TRI_V8_TRY_CATCH_END
} }

View File

@ -80,6 +80,7 @@ class IndexFiller {
void operator()() { void operator()() {
int res = TRI_ERROR_INTERNAL; int res = TRI_ERROR_INTERNAL;
TRI_ASSERT(_idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
try { try {
res = _collection->fillIndex(_trx, _idx); res = _collection->fillIndex(_trx, _idx);
@ -210,7 +211,6 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
VPackSlice value = info.get("type"); VPackSlice value = info.get("type");
if (!value.isString()) { if (!value.isString()) {
// FIXME Intenral Compatibility.
// Compatibility with old v8-vocindex. // Compatibility with old v8-vocindex.
if (generateKey) { if (generateKey) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
@ -232,8 +232,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
iid = Helper::getNumericValue<TRI_idx_iid_t>(info, "id", 0); iid = Helper::getNumericValue<TRI_idx_iid_t>(info, "id", 0);
} else if (!generateKey) { } else if (!generateKey) {
// In the restore case it is forbidden to NOT have id // In the restore case it is forbidden to NOT have id
LOG(ERR) << "ignoring index, index identifier could not be located"; THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot restore index without index identifier");
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
} }
if (iid == 0 && !isClusterConstructor) { if (iid == 0 && !isClusterConstructor) {
@ -249,7 +248,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: { case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: {
if (!isClusterConstructor) { if (!isClusterConstructor) {
// this indexes cannot be created directly // this indexes cannot be created directly
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create primary index");
} }
newIdx.reset(new arangodb::PrimaryIndex(col)); newIdx.reset(new arangodb::PrimaryIndex(col));
break; break;
@ -257,7 +256,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: { case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: {
if (!isClusterConstructor) { if (!isClusterConstructor) {
// this indexes cannot be created directly // this indexes cannot be created directly
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create edge index");
} }
newIdx.reset(new arangodb::EdgeIndex(iid, col)); newIdx.reset(new arangodb::EdgeIndex(iid, col));
break; break;
@ -281,7 +280,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
break; break;
#else #else
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED,
"index type not supported in this build"); "index type 'persistent' not supported in this build");
#endif #endif
} }
case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: { case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: {
@ -445,7 +444,12 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
} }
} }
} }
/*
if (!isCluster) {
createInitialIndexes();
}
*/
auto indexesSlice = info.get("indexes"); auto indexesSlice = info.get("indexes");
if (indexesSlice.isArray()) { if (indexesSlice.isArray()) {
bool const isCluster = ServerState::instance()->isRunningInCluster(); bool const isCluster = ServerState::instance()->isRunningInCluster();
@ -457,15 +461,27 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i
// TODO Handle Properly // TODO Handle Properly
continue; continue;
} }
auto idx = PrepareIndexFromSlice(v, false, this, true); auto idx = PrepareIndexFromSlice(v, false, this, true);
if (isCluster) { if (isCluster) {
addIndexCoordinator(idx, false); addIndexCoordinator(idx, false);
} else { } else {
/* if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
idx->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
// already added those types earlier
continue;
}
*/
addIndex(idx); addIndex(idx);
} }
} }
} }
if (_indexes.empty()) {
createInitialIndexes();
}
if (!ServerState::instance()->isCoordinator() && isPhysical) { if (!ServerState::instance()->isCoordinator() && isPhysical) {
// If we are not in the coordinator we need a path // If we are not in the coordinator we need a path
// to the physical data. // to the physical data.
@ -764,6 +780,7 @@ LogicalCollection::getIndexes() const {
// or it's indexes are freed the pointer returned will get invalidated. // or it's indexes are freed the pointer returned will get invalidated.
arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const { arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const {
TRI_ASSERT(!_indexes.empty()); TRI_ASSERT(!_indexes.empty());
TRI_ASSERT(_indexes[0]->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// the primary index must be the index at position #0 // the primary index must be the index at position #0
return static_cast<arangodb::PrimaryIndex*>(_indexes[0].get()); return static_cast<arangodb::PrimaryIndex*>(_indexes[0].get());
} }
@ -1124,7 +1141,7 @@ PhysicalCollection* LogicalCollection::createPhysical() {
void LogicalCollection::open(bool ignoreErrors) { void LogicalCollection::open(bool ignoreErrors) {
VPackBuilder builder; VPackBuilder builder;
StorageEngine* engine = EngineSelectorFeature::ENGINE; StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->getCollectionInfo(_vocbase, cid(), builder, false, 0); engine->getCollectionInfo(_vocbase, cid(), builder, true, 0);
double start = TRI_microtime(); double start = TRI_microtime();
@ -1135,17 +1152,9 @@ void LogicalCollection::open(bool ignoreErrors) {
int res = openWorker(ignoreErrors); int res = openWorker(ignoreErrors);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot open document collection from path '" << path() << "'"; THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot open document collection from path '") + path() + "': " + TRI_errno_string(res));
THROW_ARANGO_EXCEPTION(res);
} }
res = createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res);
THROW_ARANGO_EXCEPTION(res);
}
arangodb::SingleCollectionTransaction trx( arangodb::SingleCollectionTransaction trx(
arangodb::StandaloneTransactionContext::Create(_vocbase), arangodb::StandaloneTransactionContext::Create(_vocbase),
cid(), TRI_TRANSACTION_WRITE); cid(), TRI_TRANSACTION_WRITE);
@ -1309,6 +1318,7 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
return idx; return idx;
} }
TRI_ASSERT(idx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
int res = fillIndex(trx, idx.get(), false); int res = fillIndex(trx, idx.get(), false);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
@ -1336,21 +1346,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
if (!info.isObject()) { if (!info.isObject()) {
return TRI_ERROR_INTERNAL; return TRI_ERROR_INTERNAL;
} }
/* FIXME Old style First check if iid is okay and update server tick
TRI_idx_iid_t iid;
if (iis.isNumber()) {
iid = iis.getNumericValue<TRI_idx_iid_t>();
} else if (iis.isString()) {
std::string tmp = iis.copyString();
iid = static_cast<TRI_idx_iid_t>(basics::StringUtils::uint64(tmp));
} else {
LOG(ERR) << "ignoring index, index identifier could not be located";
return TRI_ERROR_INTERNAL;
}
TRI_UpdateTickServer(iid);
*/
// We create a new Index object to make sure that the index // We create a new Index object to make sure that the index
// is not handed out except for a successful case. // is not handed out except for a successful case.
std::shared_ptr<Index> newIdx; std::shared_ptr<Index> newIdx;
@ -1366,6 +1362,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info,
// FIXME New style. Update tick after successful creation of index. // FIXME New style. Update tick after successful creation of index.
TRI_UpdateTickServer(newIdx->id()); TRI_UpdateTickServer(newIdx->id());
TRI_ASSERT(newIdx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
int res = fillIndex(trx, newIdx.get()); int res = fillIndex(trx, newIdx.get());
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
@ -1516,21 +1513,16 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
} }
/// @brief creates the initial indexes for the collection /// @brief creates the initial indexes for the collection
int LogicalCollection::createInitialIndexes() { void LogicalCollection::createInitialIndexes() {
// TODO Properly fix this. The outside should make sure that only NEW collections // TODO Properly fix this. The outside should make sure that only NEW collections
// try to create the indexes. // try to create the indexes.
if (!_indexes.empty()) { if (!_indexes.empty()) {
return TRI_ERROR_NO_ERROR; return;
} }
// create primary index // create primary index
auto primaryIndex = std::make_shared<arangodb::PrimaryIndex>(this); auto primaryIndex = std::make_shared<arangodb::PrimaryIndex>(this);
addIndex(primaryIndex);
try {
addIndex(primaryIndex);
} catch (...) {
return TRI_ERROR_OUT_OF_MEMORY;
}
// create edges index // create edges index
if (_type == TRI_COL_TYPE_EDGE) { if (_type == TRI_COL_TYPE_EDGE) {
@ -1539,16 +1531,10 @@ int LogicalCollection::createInitialIndexes() {
iid = _planId; iid = _planId;
} }
try { auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
auto edgeIndex = std::make_shared<arangodb::EdgeIndex>(iid, this);
addIndex(edgeIndex); addIndex(edgeIndex);
} catch (...) {
return TRI_ERROR_OUT_OF_MEMORY;
}
} }
return TRI_ERROR_NO_ERROR;
} }
/// @brief iterator for index open /// @brief iterator for index open
@ -1633,6 +1619,7 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
// now actually fill the secondary indexes // now actually fill the secondary indexes
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// index threads must come first, otherwise this thread will block the // index threads must come first, otherwise this thread will block the
// loop and // loop and
@ -1685,6 +1672,9 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) {
} }
void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) { void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
// primary index must be added at position 0
TRI_ASSERT(idx->type() != arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX || _indexes.empty());
_indexes.emplace_back(idx); _indexes.emplace_back(idx);
// update statistics // update statistics
@ -2364,6 +2354,7 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx,
int LogicalCollection::fillIndex(arangodb::Transaction* trx, int LogicalCollection::fillIndex(arangodb::Transaction* trx,
arangodb::Index* idx, arangodb::Index* idx,
bool skipPersistent) { bool skipPersistent) {
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
if (!useSecondaryIndexes()) { if (!useSecondaryIndexes()) {
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
@ -2490,6 +2481,7 @@ int LogicalCollection::fillIndexSequential(arangodb::Transaction* trx,
auto primaryIndex = this->primaryIndex(); auto primaryIndex = this->primaryIndex();
size_t nrUsed = primaryIndex->size(); size_t nrUsed = primaryIndex->size();
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
idx->sizeHint(trx, nrUsed); idx->sizeHint(trx, nrUsed);
if (nrUsed > 0) { if (nrUsed > 0) {
@ -2965,6 +2957,7 @@ int LogicalCollection::insertSecondaryIndexes(
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
if (!useSecondary && !idx->isPersistent()) { if (!useSecondary && !idx->isPersistent()) {
continue; continue;
@ -3007,6 +3000,7 @@ int LogicalCollection::deleteSecondaryIndexes(
for (size_t i = 1; i < n; ++i) { for (size_t i = 1; i < n; ++i) {
auto idx = _indexes[i]; auto idx = _indexes[i];
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
if (!useSecondary && !idx->isPersistent()) { if (!useSecondary && !idx->isPersistent()) {
continue; continue;

View File

@ -359,7 +359,7 @@ class LogicalCollection {
// SECTION: Index creation // SECTION: Index creation
/// @brief creates the initial indexes for the collection /// @brief creates the initial indexes for the collection
int createInitialIndexes(); void createInitialIndexes();
int openWorker(bool ignoreErrors); int openWorker(bool ignoreErrors);

View File

@ -306,9 +306,14 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
_vertexExpressions.reserve(read.length()); _vertexExpressions.reserve(read.length());
for (auto const& info : VPackObjectIterator(read)) { for (auto const& info : VPackObjectIterator(read)) {
size_t d = basics::StringUtils::uint64(info.key.copyString()); size_t d = basics::StringUtils::uint64(info.key.copyString());
#ifdef ARANGODB_ENABLE_MAINAINER_MODE
auto it = _vertexExpressions.emplace( auto it = _vertexExpressions.emplace(
d, new aql::Expression(query->ast(), info.value)); d, new aql::Expression(query->ast(), info.value));
TRI_ASSERT(it.second); TRI_ASSERT(it.second);
#else
_vertexExpressions.emplace(
d, new aql::Expression(query->ast(), info.value));
#endif
} }
} }

View File

@ -303,14 +303,6 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
TRI_ASSERT(collection != nullptr); TRI_ASSERT(collection != nullptr);
try { try {
// Maybe the ordering is broken now
// create document collection
int res = collection->createInitialIndexes();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(res);
}
// cid might have been assigned // cid might have been assigned
cid = collection->cid(); cid = collection->cid();

View File

@ -414,6 +414,8 @@
// add those filters also to the collection // add those filters also to the collection
self.collection.addFilter(f.attribute, f.operator, f.value); self.collection.addFilter(f.attribute, f.operator, f.value);
}); });
self.rerender();
}, },
addFilterItem: function () { addFilterItem: function () {

View File

@ -1133,6 +1133,8 @@
arangoHelper.arangoError('Graph', 'Could not expand node: ' + id + '.'); arangoHelper.arangoError('Graph', 'Could not expand node: ' + id + '.');
} }
}); });
self.removeHelp();
}, },
checkExpand: function (data, origin) { checkExpand: function (data, origin) {
@ -1671,13 +1673,14 @@
e.color = e.originalColor; e.color = e.originalColor;
}); });
$('.nodeInfoDiv').remove();
s.refresh({ skipIndexation: true }); s.refresh({ skipIndexation: true });
} }
}; };
s.bind('rightClickStage', function (e) { s.bind('rightClickStage', function (e) {
unhighlightNodes();
self.nodeHighlighted = 'undefinedid'; self.nodeHighlighted = 'undefinedid';
unhighlightNodes();
}); });
s.bind('rightClickNode', function (e) { s.bind('rightClickNode', function (e) {

View File

@ -258,7 +258,6 @@
background-color: $c-bluegrey-dark; background-color: $c-bluegrey-dark;
border-radius: 2px; border-radius: 2px;
color: $c-white; color: $c-white;
padding: 10px; padding: 10px 20px;
padding-left: 150px;
} }
} }

View File

@ -526,12 +526,13 @@ ArangoDatabase.prototype._dropIndex = function (id) {
// / @brief returns the database version // / @brief returns the database version
// ////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////
ArangoDatabase.prototype._version = function () { ArangoDatabase.prototype._version = function (details) {
var requestResult = this._connection.GET('/_api/version'); var requestResult = this._connection.GET('/_api/version' +
(details ? '?details=true' : ''));
arangosh.checkRequestResult(requestResult); arangosh.checkRequestResult(requestResult);
return requestResult.version; return details ? requestResult : requestResult.version;
}; };
// ////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////

View File

@ -1551,7 +1551,7 @@ function ahuacatlQueryGeneralTraversalTestSuite() {
); );
}, },
testGRAPH_SHOTEST_PATH_with_stopAtFirstMatch: function () { testGRAPH_SHORTEST_PATH_with_stopAtFirstMatch: function () {
var actual; var actual;
actual = getQueryResults("FOR e IN arangodb::GRAPH_SHORTEST_PATH('werKenntWen', 'UnitTests_Frankfurter/Fritz', " + actual = getQueryResults("FOR e IN arangodb::GRAPH_SHORTEST_PATH('werKenntWen', 'UnitTests_Frankfurter/Fritz', " +

View File

@ -288,7 +288,7 @@ void WindowsServiceFeature::installService() {
} }
SERVICE_DESCRIPTION description = { SERVICE_DESCRIPTION description = {
"multi-model NoSQL database (version " ARANGODB_VERSION ")"}; "multi-model NoSQL database (version " ARANGODB_VERSION_FULL ")"};
ChangeServiceConfig2(schService, SERVICE_CONFIG_DESCRIPTION, &description); ChangeServiceConfig2(schService, SERVICE_CONFIG_DESCRIPTION, &description);
std::cout << "INFO: added service with command line '" << command << "'" std::cout << "INFO: added service with command line '" << command << "'"

View File

@ -115,7 +115,7 @@ void Version::initialize() {
#if USE_ENTERPRISE #if USE_ENTERPRISE
Values["enterprise-version"] = ENTERPRISE_VERSION; Values["enterprise-version"] = ARANGODB_ENTERPRISE_VERSION;
#endif #endif
#if HAVE_ARANGODB_BUILD_REPOSITORY #if HAVE_ARANGODB_BUILD_REPOSITORY
@ -416,6 +416,8 @@ std::string Version::getDetailed() {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
void Version::getVPack(VPackBuilder& dst) { void Version::getVPack(VPackBuilder& dst) {
TRI_ASSERT(!dst.isClosed());
for (auto const& it : Values) { for (auto const& it : Values) {
std::string const& value = it.second; std::string const& value = it.second;

View File

@ -28,12 +28,33 @@
#include "Basics/build.h" #include "Basics/build.h"
#ifdef USE_ENTERPRISE
#include "Enterprise/Version.h"
#ifndef ARANGODB_ENTERPRISE_VERSION
#error "enterprise version number is not defined"
#endif
#ifdef _DEBUG
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "-DEBUG]"
#else
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "]"
#endif
#else
#ifdef ARANGODB_ENTERPRISE_VERSION
#error "enterprise version number should not be defined"
#endif
#ifdef _DEBUG #ifdef _DEBUG
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "-DEBUG]" #define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "-DEBUG]"
#else #else
#define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "]" #define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "]"
#endif #endif
#endif
namespace arangodb { namespace arangodb {
namespace velocypack { namespace velocypack {
class Builder; class Builder;
@ -43,122 +64,65 @@ namespace rest {
class Version { class Version {
private: private:
//////////////////////////////////////////////////////////////////////////////
/// @brief create the version information /// @brief create the version information
//////////////////////////////////////////////////////////////////////////////
Version() = delete; Version() = delete;
Version(Version const&) = delete; Version(Version const&) = delete;
Version& operator=(Version const&) = delete; Version& operator=(Version const&) = delete;
public: public:
//////////////////////////////////////////////////////////////////////////////
/// @brief parse a version string into major, minor /// @brief parse a version string into major, minor
/// returns -1, -1 when the version string has an invalid format /// returns -1, -1 when the version string has an invalid format
//////////////////////////////////////////////////////////////////////////////
static std::pair<int, int> parseVersionString(std::string const&); static std::pair<int, int> parseVersionString(std::string const&);
//////////////////////////////////////////////////////////////////////////////
/// @brief initialize /// @brief initialize
//////////////////////////////////////////////////////////////////////////////
static void initialize(); static void initialize();
//////////////////////////////////////////////////////////////////////////////
/// @brief get numeric server version /// @brief get numeric server version
//////////////////////////////////////////////////////////////////////////////
static int32_t getNumericServerVersion(); static int32_t getNumericServerVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get server version /// @brief get server version
//////////////////////////////////////////////////////////////////////////////
static std::string getServerVersion(); static std::string getServerVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get BOOST version /// @brief get BOOST version
//////////////////////////////////////////////////////////////////////////////
static std::string getBoostVersion(); static std::string getBoostVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get V8 version /// @brief get V8 version
//////////////////////////////////////////////////////////////////////////////
static std::string getV8Version(); static std::string getV8Version();
//////////////////////////////////////////////////////////////////////////////
/// @brief get OpenSSL version /// @brief get OpenSSL version
//////////////////////////////////////////////////////////////////////////////
static std::string getOpenSSLVersion(); static std::string getOpenSSLVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get libev version /// @brief get libev version
//////////////////////////////////////////////////////////////////////////////
static std::string getLibevVersion(); static std::string getLibevVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get vpack version /// @brief get vpack version
//////////////////////////////////////////////////////////////////////////////
static std::string getVPackVersion(); static std::string getVPackVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get zlib version /// @brief get zlib version
//////////////////////////////////////////////////////////////////////////////
static std::string getZLibVersion(); static std::string getZLibVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get ICU version /// @brief get ICU version
//////////////////////////////////////////////////////////////////////////////
static std::string getICUVersion(); static std::string getICUVersion();
//////////////////////////////////////////////////////////////////////////////
/// @brief get compiler /// @brief get compiler
//////////////////////////////////////////////////////////////////////////////
static std::string getCompiler(); static std::string getCompiler();
//////////////////////////////////////////////////////////////////////////////
/// @brief get endianness /// @brief get endianness
//////////////////////////////////////////////////////////////////////////////
static std::string getEndianness(); static std::string getEndianness();
//////////////////////////////////////////////////////////////////////////////
/// @brief get build date /// @brief get build date
//////////////////////////////////////////////////////////////////////////////
static std::string getBuildDate(); static std::string getBuildDate();
//////////////////////////////////////////////////////////////////////////////
/// @brief get build repository /// @brief get build repository
//////////////////////////////////////////////////////////////////////////////
static std::string getBuildRepository(); static std::string getBuildRepository();
//////////////////////////////////////////////////////////////////////////////
/// @brief return a server version string /// @brief return a server version string
//////////////////////////////////////////////////////////////////////////////
static std::string getVerboseVersionString(); static std::string getVerboseVersionString();
//////////////////////////////////////////////////////////////////////////////
/// @brief get detailed version information as a (multi-line) string /// @brief get detailed version information as a (multi-line) string
//////////////////////////////////////////////////////////////////////////////
static std::string getDetailed(); static std::string getDetailed();
//////////////////////////////////////////////////////////////////////////////
/// @brief VelocyPack all data /// @brief VelocyPack all data
//////////////////////////////////////////////////////////////////////////////
static void getVPack(arangodb::velocypack::Builder&); static void getVPack(arangodb::velocypack::Builder&);
public: public:

View File

@ -70,7 +70,7 @@ struct VppInputMessage {
if (!_payload.empty()) { if (!_payload.empty()) {
return _payload.front(); return _payload.front();
} }
return VPackSlice{}; return VPackSlice::noneSlice();
} }
std::vector<VPackSlice> const& payloads() const { return _payload; } std::vector<VPackSlice> const& payloads() const { return _payload; }
@ -111,7 +111,7 @@ struct VPackMessageNoOwnBuffer {
if (_payloads.size() && _generateBody) { if (_payloads.size() && _generateBody) {
return _payloads.front(); return _payloads.front();
} }
return arangodb::basics::VelocyPackHelper::NullValue(); return VPackSlice::noneSlice();
} }
std::vector<VPackSlice> payloads() { return _payloads; } std::vector<VPackSlice> payloads() { return _payloads; }