diff --git a/CHANGELOG b/CHANGELOG index 8ee691bee3..a34f9b925f 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -24,9 +24,11 @@ devel * added Optimizer Rule for other indexes in Traversals this allows AQL traversals to use other indexes than the edge index. So traversals with filters on edges can now make use of more specific - indexes. E.g.: - FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar" - Will prefer an Hash Index on [_from, foo] above the EdgeIndex. + indexes, e.g. + + FOR v, e, p IN 2 OUTBOUND @start @@edge FILTER p.edges[0].foo == "bar" + + will prefer a Hash Index on [_from, foo] above the EdgeIndex. * fixed epoch computation in hybrid logical clock @@ -68,12 +70,14 @@ devel * added module.context.createDocumentationRouter to replace module.context.apiDocumentation * bug in RAFT implementation of reads. dethroned leader still answered - requests in isolation + requests in isolation v3.0.8 (XXXX-XX-XX) ------------------- +* fixed issue #2005 + * fixed issue #2039 diff --git a/LICENSES-OTHER-COMPONENTS.md b/LICENSES-OTHER-COMPONENTS.md index 0e9e92e806..081ab6bb3c 100644 --- a/LICENSES-OTHER-COMPONENTS.md +++ b/LICENSES-OTHER-COMPONENTS.md @@ -2,11 +2,11 @@ ## C/C++ Libraries -### Boost 1.58.0 +### Boost 1.61.0 * Project Home: http://www.boost.org/ * License: Boost [boost software license](http://www.boost.org/LICENSE_1_0.txt) -* License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.58.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp) +* License: argument_value_usage.hpp [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/devel/3rdParty/boost/1.61.0/boost/test/utils/runtime/cla/detail/argument_value_usage.hpp) ### fpconv_dtoa @@ -75,18 +75,10 @@ ## Programs -### autoconf +### cmake -* Project Home: http://www.gnu.org/software/autoconf/autoconf.html -* only used to generate code, not part of the distribution -* License: configure [free as-is license](https://github.com/arangodb/arangodb/blob/master/configure#L11) -* License: ax_cxx_compile_stdcxx_11.m4 [free as-is license](https://github.com/arangodb/arangodb/blob/master/m4/ax_cxx_compile_stdcxx_11.m4#L25) - -### automake - -* Project Home: https://www.gnu.org/software/automake/ -* only used to generate code, not part of the distribution -* License: Makefile.in [free as-is license](https://raw.githubusercontent.com/arangodb/arangodb/master/Makefile.in) +* Project Home: https://cmake.org/ +* License OSI-approved BSD 3-clause License [https://cmake.org/licensing/] ### Bison 3.0 diff --git a/arangod/Aql/ExecutionEngine.cpp b/arangod/Aql/ExecutionEngine.cpp index 2949834b0c..bdd7d9616d 100644 --- a/arangod/Aql/ExecutionEngine.cpp +++ b/arangod/Aql/ExecutionEngine.cpp @@ -927,13 +927,16 @@ struct CoordinatorInstanciator : public WalkerWorker { THROW_ARANGO_EXCEPTION_MESSAGE( TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message); } else { - // Only if the aresult was successful we will get here + // Only if the result was successful we will get here arangodb::basics::StringBuffer& body = res->result->getBody(); std::shared_ptr builder = VPackParser::fromJson(body.c_str(), body.length()); VPackSlice resultSlice = builder->slice(); - TRI_ASSERT(resultSlice.isNumber()); + if (!resultSlice.isNumber()) { + THROW_ARANGO_EXCEPTION_MESSAGE( + TRI_ERROR_INTERNAL, "got unexpected response from engine lock request"); + } auto engineId = resultSlice.getNumericValue(); TRI_ASSERT(engineId != 0); traverserEngines.emplace(engineId, shardSet); diff --git a/arangod/GeneralServer/VppCommTask.cpp b/arangod/GeneralServer/VppCommTask.cpp index 11d56adeac..479dd60e6b 100644 --- a/arangod/GeneralServer/VppCommTask.cpp +++ b/arangod/GeneralServer/VppCommTask.cpp @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -51,7 +52,8 @@ using namespace arangodb::rest; VppCommTask::VppCommTask(GeneralServer* server, TRI_socket_t sock, ConnectionInfo&& info, double timeout) : Task("VppCommTask"), - GeneralCommTask(server, sock, std::move(info), timeout) { + GeneralCommTask(server, sock, std::move(info), timeout), + _authenticatedUser() { _protocol = "vpp"; _readBuffer.reserve( _bufferLength); // ATTENTION <- this is required so we do not @@ -85,14 +87,12 @@ void VppCommTask::addResponse(VppResponse* response) { } LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "response -- end"; - // FIXME (obi) - // If the message is big we will create many small chunks in a loop. - // For the first tests we just send single Messages - // adds chunk header infromation and creates SingBuffer* that can be // used with _writeBuffers - auto buffers = createChunkForNetwork(slices, id, - std::numeric_limits::max()); + auto buffers = createChunkForNetwork( + slices, id, (std::numeric_limits::max)(), + false); // set some sensible maxchunk + // size and compression double const totalTime = getAgent(id)->elapsedSinceReadStart(); @@ -185,143 +185,17 @@ bool VppCommTask::processRead() { bool read_maybe_only_part_of_buffer = false; VppInputMessage message; // filled in CASE 1 or CASE 2b - // CASE 1: message is in one chunk if (chunkHeader._isFirst && chunkHeader._chunk == 1) { - _agents.emplace( - std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); - - auto agent = getAgent(chunkHeader._messageID); - agent->acquire(); - agent->requestStatisticsAgentSetReadStart(); - - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "chunk contains single message"; - std::size_t payloads = 0; - - try { - payloads = validateAndCount(vpackBegin, chunkEnd); - } catch (std::exception const& e) { - handleSimpleError(rest::ResponseCode::BAD, - TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(), - chunkHeader._messageID); - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "VPack Validation failed!" - << e.what(); - closeTask(rest::ResponseCode::BAD); - return false; - } catch (...) { - handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID); - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "VPack Validation failed!"; - closeTask(rest::ResponseCode::BAD); - return false; + // CASE 1: message is in one chunk + if (auto rv = getMessageFromSingleChunk(chunkHeader, message, doExecute, + vpackBegin, chunkEnd)) { + return *rv; } - - VPackBuffer buffer; - buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); - message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme - - // message._header = VPackSlice(message._buffer.data()); - // if (payloadOffset) { - // message._payload = VPackSlice(message._buffer.data() + payloadOffset); - // } - - doExecute = true; - getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd(); - } - // CASE 2: message is in multiple chunks - auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID); - - // CASE 2a: chunk starts new message - if (chunkHeader._isFirst) { // first chunk of multi chunk message - _agents.emplace( - std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); - - auto agent = getAgent(chunkHeader._messageID); - agent->acquire(); - agent->requestStatisticsAgentSetReadStart(); - - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "chunk starts a new message"; - if (incompleteMessageItr != _incompleteMessages.end()) { - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) - << "VppCommTask: " - << "Message should be first but is already in the Map of incomplete " - "messages"; - closeTask(rest::ResponseCode::BAD); - return false; + } else { + if (auto rv = getMessageFromMultiChunks(chunkHeader, message, doExecute, + vpackBegin, chunkEnd)) { + return *rv; } - - // TODO: is a 32bit value sufficient for the messageLength here? - IncompleteVPackMessage message( - static_cast(chunkHeader._messageLength), - chunkHeader._chunk /*number of chunks*/); - message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); - auto insertPair = _incompleteMessages.emplace( - std::make_pair(chunkHeader._messageID, std::move(message))); - if (!insertPair.second) { - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "insert failed"; - closeTask(rest::ResponseCode::BAD); - return false; - } - - // CASE 2b: chunk continues a message - } else { // followup chunk of some mesage - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "chunk continues a message"; - if (incompleteMessageItr == _incompleteMessages.end()) { - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) - << "VppCommTask: " - << "found message without previous part"; - closeTask(rest::ResponseCode::BAD); - return false; - } - auto& im = incompleteMessageItr->second; // incomplete Message - im._currentChunk++; - assert(im._currentChunk == chunkHeader._chunk); - im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); - // check buffer longer than length - - // MESSAGE COMPLETE - if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) { - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "chunk completes a message"; - std::size_t payloads = 0; - - try { - payloads = - validateAndCount(reinterpret_cast(im._buffer.data()), - reinterpret_cast( - im._buffer.data() + im._buffer.byteSize())); - - } catch (std::exception const& e) { - handleSimpleError(rest::ResponseCode::BAD, - TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(), - chunkHeader._messageID); - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "VPack Validation failed!" - << e.what(); - closeTask(rest::ResponseCode::BAD); - return false; - } catch (...) { - handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID); - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " - << "VPack Validation failed!"; - closeTask(rest::ResponseCode::BAD); - return false; - } - - message.set(chunkHeader._messageID, std::move(im._buffer), payloads); - _incompleteMessages.erase(incompleteMessageItr); - // check length - - doExecute = true; - getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd(); - } - LOG_TOPIC(DEBUG, Logger::COMMUNICATION) - << "VppCommTask: " - << "chunk does not complete a message"; } read_maybe_only_part_of_buffer = true; @@ -346,6 +220,7 @@ bool VppCommTask::processRead() { << "\"," << message.payload().toJson() << "\""; + // get type of request int type = meta::underlyingValue(rest::RequestType::ILLEGAL); try { type = header.at(1).getInt(); @@ -357,14 +232,47 @@ bool VppCommTask::processRead() { closeTask(rest::ResponseCode::BAD); return false; } + + // handle request types if (type == 1000) { - // do auth + // do authentication + // std::string encryption = header.at(2).copyString(); + std::string user = header.at(3).copyString(); + std::string pass = header.at(4).copyString(); + auto auth = basics::StringUtils::encodeBase64(user + ":" + pass); + AuthResult result = GeneralServerFeature::AUTH_INFO.checkAuthentication( + AuthInfo::AuthType::BASIC, auth); + + if (result._authorized) { + _authenticatedUser = std::move(user); + handleSimpleError(rest::ResponseCode::OK, TRI_ERROR_NO_ERROR, + "authentication successful", chunkHeader._messageID); + } else { + _authenticatedUser.clear(); + handleSimpleError(rest::ResponseCode::UNAUTHORIZED, + TRI_ERROR_HTTP_UNAUTHORIZED, "authentication failed", + chunkHeader._messageID); + } } else { - // check auth // the handler will take ownersip of this pointer std::unique_ptr request(new VppRequest( _connectionInfo, std::move(message), chunkHeader._messageID)); GeneralServerFeature::HANDLER_FACTORY->setRequestContext(request.get()); + request->setUser(_authenticatedUser); + + // check authentication + std::string const& dbname = request->databaseName(); + if (!_authenticatedUser.empty() || !dbname.empty()) { + AuthLevel level = GeneralServerFeature::AUTH_INFO.canUseDatabase( + _authenticatedUser, dbname); + + if (level != AuthLevel::RW) { + handleSimpleError( + rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_FORBIDDEN, + TRI_errno_string(TRI_ERROR_FORBIDDEN), chunkHeader._messageID); + } + } + // make sure we have a database if (request->requestContext() == nullptr) { handleSimpleError(rest::ResponseCode::NOT_FOUND, @@ -455,3 +363,149 @@ void VppCommTask::handleSimpleError(rest::ResponseCode responseCode, _clientClosed = true; } } + +boost::optional VppCommTask::getMessageFromSingleChunk( + ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute, + char const* vpackBegin, char const* chunkEnd) { + // add agent for this new message + _agents.emplace( + std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); + + auto agent = getAgent(chunkHeader._messageID); + agent->acquire(); + agent->requestStatisticsAgentSetReadStart(); + + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "chunk contains single message"; + std::size_t payloads = 0; + + try { + payloads = validateAndCount(vpackBegin, chunkEnd); + } catch (std::exception const& e) { + handleSimpleError(rest::ResponseCode::BAD, + TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(), + chunkHeader._messageID); + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "VPack Validation failed!" + << e.what(); + closeTask(rest::ResponseCode::BAD); + return false; + } catch (...) { + handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID); + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "VPack Validation failed!"; + closeTask(rest::ResponseCode::BAD); + return false; + } + + VPackBuffer buffer; + buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); + message.set(chunkHeader._messageID, std::move(buffer), payloads); // fixme + + doExecute = true; + getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd(); + return boost::none; +} + +boost::optional VppCommTask::getMessageFromMultiChunks( + ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute, + char const* vpackBegin, char const* chunkEnd) { + // CASE 2: message is in multiple chunks + auto incompleteMessageItr = _incompleteMessages.find(chunkHeader._messageID); + + // CASE 2a: chunk starts new message + if (chunkHeader._isFirst) { // first chunk of multi chunk message + // add agent for this new message + _agents.emplace( + std::make_pair(chunkHeader._messageID, RequestStatisticsAgent(true))); + + auto agent = getAgent(chunkHeader._messageID); + agent->acquire(); + agent->requestStatisticsAgentSetReadStart(); + + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "chunk starts a new message"; + if (incompleteMessageItr != _incompleteMessages.end()) { + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) + << "VppCommTask: " + << "Message should be first but is already in the Map of " + "incomplete " + "messages"; + closeTask(rest::ResponseCode::BAD); + return false; + } + + // TODO: is a 32bit value sufficient for the messageLength here? + IncompleteVPackMessage message( + static_cast(chunkHeader._messageLength), + chunkHeader._chunk /*number of chunks*/); + message._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); + auto insertPair = _incompleteMessages.emplace( + std::make_pair(chunkHeader._messageID, std::move(message))); + if (!insertPair.second) { + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "insert failed"; + closeTask(rest::ResponseCode::BAD); + return false; + } + + // CASE 2b: chunk continues a message + } else { // followup chunk of some mesage + // do not add agent for this continued message + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "chunk continues a message"; + if (incompleteMessageItr == _incompleteMessages.end()) { + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) + << "VppCommTask: " + << "found message without previous part"; + closeTask(rest::ResponseCode::BAD); + return false; + } + auto& im = incompleteMessageItr->second; // incomplete Message + im._currentChunk++; + assert(im._currentChunk == chunkHeader._chunk); + im._buffer.append(vpackBegin, std::distance(vpackBegin, chunkEnd)); + // check buffer longer than length + + // MESSAGE COMPLETE + if (im._currentChunk == im._numberOfChunks - 1 /* zero based counting */) { + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "chunk completes a message"; + std::size_t payloads = 0; + + try { + payloads = + validateAndCount(reinterpret_cast(im._buffer.data()), + reinterpret_cast( + im._buffer.data() + im._buffer.byteSize())); + + } catch (std::exception const& e) { + handleSimpleError(rest::ResponseCode::BAD, + TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, e.what(), + chunkHeader._messageID); + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "VPack Validation failed!" + << e.what(); + closeTask(rest::ResponseCode::BAD); + return false; + } catch (...) { + handleSimpleError(rest::ResponseCode::BAD, chunkHeader._messageID); + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) << "VppCommTask: " + << "VPack Validation failed!"; + closeTask(rest::ResponseCode::BAD); + return false; + } + + message.set(chunkHeader._messageID, std::move(im._buffer), payloads); + _incompleteMessages.erase(incompleteMessageItr); + // check length + + doExecute = true; + getAgent(chunkHeader._messageID)->requestStatisticsAgentSetReadEnd(); + } + LOG_TOPIC(DEBUG, Logger::COMMUNICATION) + << "VppCommTask: " + << "chunk does not complete a message"; + } + return boost::none; +} diff --git a/arangod/GeneralServer/VppCommTask.h b/arangod/GeneralServer/VppCommTask.h index 17ee961d4c..40df7dcd4d 100644 --- a/arangod/GeneralServer/VppCommTask.h +++ b/arangod/GeneralServer/VppCommTask.h @@ -29,6 +29,7 @@ #include "lib/Rest/VppRequest.h" #include "lib/Rest/VppResponse.h" +#include #include namespace arangodb { @@ -122,6 +123,16 @@ class VppCommTask : public GeneralCommTask { ChunkHeader readChunkHeader(); // sub-function of processRead void replyToIncompleteMessages(); + boost::optional getMessageFromSingleChunk( + ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute, + char const* vpackBegin, char const* chunkEnd); + + boost::optional getMessageFromMultiChunks( + ChunkHeader const& chunkHeader, VppInputMessage& message, bool& doExecute, + char const* vpackBegin, char const* chunkEnd); + + std::string _authenticatedUser; + // user // authenticated or not // database aus url diff --git a/arangod/Indexes/PrimaryIndex.cpp b/arangod/Indexes/PrimaryIndex.cpp index 71ca86db93..4c48adff95 100644 --- a/arangod/Indexes/PrimaryIndex.cpp +++ b/arangod/Indexes/PrimaryIndex.cpp @@ -209,11 +209,11 @@ void PrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const { } int PrimaryIndex::insert(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "insert() called for primary index"); } int PrimaryIndex::remove(arangodb::Transaction*, TRI_doc_mptr_t const*, bool) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "remove() called for primary index"); } /// @brief unload the index data from memory diff --git a/arangod/RestServer/BootstrapFeature.cpp b/arangod/RestServer/BootstrapFeature.cpp index c3d816453e..618468d417 100644 --- a/arangod/RestServer/BootstrapFeature.cpp +++ b/arangod/RestServer/BootstrapFeature.cpp @@ -35,10 +35,6 @@ #include "RestServer/DatabaseFeature.h" #include "V8Server/V8DealerFeature.h" -#ifdef USE_ENTERPRISE -#include "Enterprise/Version.h" -#endif - using namespace arangodb; using namespace arangodb::application_features; using namespace arangodb::options; @@ -160,13 +156,8 @@ void BootstrapFeature::start() { // Start service properly: rest::RestHandlerFactory::setMaintenance(false); -#ifdef USE_ENTERPRISE - LOG(INFO) << "ArangoDB (enterprise version " << ARANGODB_VERSION_FULL - << " / " << ENTERPRISE_VERSION << ") is ready for business. Have fun!"; -#else LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL << ") is ready for business. Have fun!"; -#endif if (_bark) { LOG(INFO) << "The dog says: wau wau!"; diff --git a/arangod/V8Server/v8-vocbase.cpp b/arangod/V8Server/v8-vocbase.cpp index 28de7a4d9e..e3da3d4a5c 100644 --- a/arangod/V8Server/v8-vocbase.cpp +++ b/arangod/V8Server/v8-vocbase.cpp @@ -1917,7 +1917,23 @@ static void JS_VersionServer(v8::FunctionCallbackInfo const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); v8::HandleScope scope(isolate); - TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION)); + bool details = false; + if (args.Length() > 0) { + details = TRI_ObjectToBoolean(args[0]); + } + + if (!details) { + // return version string + TRI_V8_RETURN(TRI_V8_ASCII_STRING(ARANGODB_VERSION)); + } + + // return version details + VPackBuilder builder; + builder.openObject(); + rest::Version::getVPack(builder); + builder.close(); + + TRI_V8_RETURN(TRI_VPackToV8(isolate, builder.slice())); TRI_V8_TRY_CATCH_END } diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index c0cf7a6d41..405a6f5415 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -80,6 +80,7 @@ class IndexFiller { void operator()() { int res = TRI_ERROR_INTERNAL; + TRI_ASSERT(_idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); try { res = _collection->fillIndex(_trx, _idx); @@ -210,7 +211,6 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, VPackSlice value = info.get("type"); if (!value.isString()) { - // FIXME Intenral Compatibility. // Compatibility with old v8-vocindex. if (generateKey) { THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); @@ -232,8 +232,7 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, iid = Helper::getNumericValue(info, "id", 0); } else if (!generateKey) { // In the restore case it is forbidden to NOT have id - LOG(ERR) << "ignoring index, index identifier could not be located"; - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot restore index without index identifier"); } if (iid == 0 && !isClusterConstructor) { @@ -249,7 +248,7 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: { if (!isClusterConstructor) { // this indexes cannot be created directly - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create primary index"); } newIdx.reset(new arangodb::PrimaryIndex(col)); break; @@ -257,7 +256,7 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: { if (!isClusterConstructor) { // this indexes cannot be created directly - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot create edge index"); } newIdx.reset(new arangodb::EdgeIndex(iid, col)); break; @@ -281,7 +280,7 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, break; #else THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, - "index type not supported in this build"); + "index type 'persistent' not supported in this build"); #endif } case arangodb::Index::TRI_IDX_TYPE_FULLTEXT_INDEX: { @@ -445,7 +444,12 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i } } } - + + /* + if (!isCluster) { + createInitialIndexes(); + } +*/ auto indexesSlice = info.get("indexes"); if (indexesSlice.isArray()) { bool const isCluster = ServerState::instance()->isRunningInCluster(); @@ -457,15 +461,27 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase, VPackSlice const& i // TODO Handle Properly continue; } + auto idx = PrepareIndexFromSlice(v, false, this, true); + if (isCluster) { addIndexCoordinator(idx, false); } else { +/* if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX || + idx->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) { + // already added those types earlier + continue; + } +*/ addIndex(idx); } } } + if (_indexes.empty()) { + createInitialIndexes(); + } + if (!ServerState::instance()->isCoordinator() && isPhysical) { // If we are not in the coordinator we need a path // to the physical data. @@ -764,6 +780,7 @@ LogicalCollection::getIndexes() const { // or it's indexes are freed the pointer returned will get invalidated. arangodb::PrimaryIndex* LogicalCollection::primaryIndex() const { TRI_ASSERT(!_indexes.empty()); + TRI_ASSERT(_indexes[0]->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); // the primary index must be the index at position #0 return static_cast(_indexes[0].get()); } @@ -1124,7 +1141,7 @@ PhysicalCollection* LogicalCollection::createPhysical() { void LogicalCollection::open(bool ignoreErrors) { VPackBuilder builder; StorageEngine* engine = EngineSelectorFeature::ENGINE; - engine->getCollectionInfo(_vocbase, cid(), builder, false, 0); + engine->getCollectionInfo(_vocbase, cid(), builder, true, 0); double start = TRI_microtime(); @@ -1135,17 +1152,9 @@ void LogicalCollection::open(bool ignoreErrors) { int res = openWorker(ignoreErrors); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot open document collection from path '" << path() << "'"; - THROW_ARANGO_EXCEPTION(res); + THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot open document collection from path '") + path() + "': " + TRI_errno_string(res)); } - res = createInitialIndexes(); - - if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res); - THROW_ARANGO_EXCEPTION(res); - } - arangodb::SingleCollectionTransaction trx( arangodb::StandaloneTransactionContext::Create(_vocbase), cid(), TRI_TRANSACTION_WRITE); @@ -1309,6 +1318,7 @@ std::shared_ptr LogicalCollection::createIndex(Transaction* trx, return idx; } + TRI_ASSERT(idx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); int res = fillIndex(trx, idx.get(), false); if (res != TRI_ERROR_NO_ERROR) { @@ -1336,21 +1346,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info, if (!info.isObject()) { return TRI_ERROR_INTERNAL; } - /* FIXME Old style First check if iid is okay and update server tick - TRI_idx_iid_t iid; - if (iis.isNumber()) { - iid = iis.getNumericValue(); - } else if (iis.isString()) { - std::string tmp = iis.copyString(); - iid = static_cast(basics::StringUtils::uint64(tmp)); - } else { - LOG(ERR) << "ignoring index, index identifier could not be located"; - - return TRI_ERROR_INTERNAL; - } - - TRI_UpdateTickServer(iid); - */ + // We create a new Index object to make sure that the index // is not handed out except for a successful case. std::shared_ptr newIdx; @@ -1366,6 +1362,7 @@ int LogicalCollection::restoreIndex(Transaction* trx, VPackSlice const& info, // FIXME New style. Update tick after successful creation of index. TRI_UpdateTickServer(newIdx->id()); + TRI_ASSERT(newIdx.get()->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); int res = fillIndex(trx, newIdx.get()); if (res != TRI_ERROR_NO_ERROR) { @@ -1516,21 +1513,16 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) { } /// @brief creates the initial indexes for the collection -int LogicalCollection::createInitialIndexes() { +void LogicalCollection::createInitialIndexes() { // TODO Properly fix this. The outside should make sure that only NEW collections // try to create the indexes. if (!_indexes.empty()) { - return TRI_ERROR_NO_ERROR; + return; } // create primary index auto primaryIndex = std::make_shared(this); - - try { - addIndex(primaryIndex); - } catch (...) { - return TRI_ERROR_OUT_OF_MEMORY; - } + addIndex(primaryIndex); // create edges index if (_type == TRI_COL_TYPE_EDGE) { @@ -1539,16 +1531,10 @@ int LogicalCollection::createInitialIndexes() { iid = _planId; } - try { - auto edgeIndex = std::make_shared(iid, this); + auto edgeIndex = std::make_shared(iid, this); - addIndex(edgeIndex); - } catch (...) { - return TRI_ERROR_OUT_OF_MEMORY; - } + addIndex(edgeIndex); } - - return TRI_ERROR_NO_ERROR; } /// @brief iterator for index open @@ -1633,6 +1619,7 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) { // now actually fill the secondary indexes for (size_t i = 1; i < n; ++i) { auto idx = _indexes[i]; + TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); // index threads must come first, otherwise this thread will block the // loop and @@ -1685,6 +1672,9 @@ int LogicalCollection::fillIndexes(arangodb::Transaction* trx) { } void LogicalCollection::addIndex(std::shared_ptr idx) { + // primary index must be added at position 0 + TRI_ASSERT(idx->type() != arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX || _indexes.empty()); + _indexes.emplace_back(idx); // update statistics @@ -2364,6 +2354,7 @@ int LogicalCollection::rollbackOperation(arangodb::Transaction* trx, int LogicalCollection::fillIndex(arangodb::Transaction* trx, arangodb::Index* idx, bool skipPersistent) { + TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); TRI_ASSERT(!ServerState::instance()->isCoordinator()); if (!useSecondaryIndexes()) { return TRI_ERROR_NO_ERROR; @@ -2490,6 +2481,7 @@ int LogicalCollection::fillIndexSequential(arangodb::Transaction* trx, auto primaryIndex = this->primaryIndex(); size_t nrUsed = primaryIndex->size(); + TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); idx->sizeHint(trx, nrUsed); if (nrUsed > 0) { @@ -2965,6 +2957,7 @@ int LogicalCollection::insertSecondaryIndexes( for (size_t i = 1; i < n; ++i) { auto idx = _indexes[i]; + TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); if (!useSecondary && !idx->isPersistent()) { continue; @@ -3007,6 +3000,7 @@ int LogicalCollection::deleteSecondaryIndexes( for (size_t i = 1; i < n; ++i) { auto idx = _indexes[i]; + TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); if (!useSecondary && !idx->isPersistent()) { continue; diff --git a/arangod/VocBase/LogicalCollection.h b/arangod/VocBase/LogicalCollection.h index 45b25bec5e..e4687a77e0 100644 --- a/arangod/VocBase/LogicalCollection.h +++ b/arangod/VocBase/LogicalCollection.h @@ -359,7 +359,7 @@ class LogicalCollection { // SECTION: Index creation /// @brief creates the initial indexes for the collection - int createInitialIndexes(); + void createInitialIndexes(); int openWorker(bool ignoreErrors); diff --git a/arangod/VocBase/TraverserOptions.cpp b/arangod/VocBase/TraverserOptions.cpp index 1e916d2e22..50afe62128 100644 --- a/arangod/VocBase/TraverserOptions.cpp +++ b/arangod/VocBase/TraverserOptions.cpp @@ -306,9 +306,14 @@ arangodb::traverser::TraverserOptions::TraverserOptions( _vertexExpressions.reserve(read.length()); for (auto const& info : VPackObjectIterator(read)) { size_t d = basics::StringUtils::uint64(info.key.copyString()); +#ifdef ARANGODB_ENABLE_MAINAINER_MODE auto it = _vertexExpressions.emplace( d, new aql::Expression(query->ast(), info.value)); TRI_ASSERT(it.second); +#else + _vertexExpressions.emplace( + d, new aql::Expression(query->ast(), info.value)); +#endif } } diff --git a/arangod/VocBase/vocbase.cpp b/arangod/VocBase/vocbase.cpp index cc86d26348..65b0788d70 100644 --- a/arangod/VocBase/vocbase.cpp +++ b/arangod/VocBase/vocbase.cpp @@ -303,14 +303,6 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker( TRI_ASSERT(collection != nullptr); try { - // Maybe the ordering is broken now - // create document collection - int res = collection->createInitialIndexes(); - - if (res != TRI_ERROR_NO_ERROR) { - THROW_ARANGO_EXCEPTION(res); - } - // cid might have been assigned cid = collection->cid(); diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/views/documentsView.js b/js/apps/system/_admin/aardvark/APP/frontend/js/views/documentsView.js index 4e21f08f2e..200ef8b91d 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/views/documentsView.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/views/documentsView.js @@ -414,6 +414,8 @@ // add those filters also to the collection self.collection.addFilter(f.attribute, f.operator, f.value); }); + + self.rerender(); }, addFilterItem: function () { diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/views/graphViewer2.js b/js/apps/system/_admin/aardvark/APP/frontend/js/views/graphViewer2.js index da7bded52b..1040cb39a1 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/views/graphViewer2.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/views/graphViewer2.js @@ -1133,6 +1133,8 @@ arangoHelper.arangoError('Graph', 'Could not expand node: ' + id + '.'); } }); + + self.removeHelp(); }, checkExpand: function (data, origin) { @@ -1671,13 +1673,14 @@ e.color = e.originalColor; }); + $('.nodeInfoDiv').remove(); s.refresh({ skipIndexation: true }); } }; s.bind('rightClickStage', function (e) { - unhighlightNodes(); self.nodeHighlighted = 'undefinedid'; + unhighlightNodes(); }); s.bind('rightClickNode', function (e) { diff --git a/js/apps/system/_admin/aardvark/APP/frontend/scss/_graphViewer2.scss b/js/apps/system/_admin/aardvark/APP/frontend/scss/_graphViewer2.scss index 5ba46f6020..decabba558 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/scss/_graphViewer2.scss +++ b/js/apps/system/_admin/aardvark/APP/frontend/scss/_graphViewer2.scss @@ -258,7 +258,6 @@ background-color: $c-bluegrey-dark; border-radius: 2px; color: $c-white; - padding: 10px; - padding-left: 150px; + padding: 10px 20px; } } diff --git a/js/client/modules/@arangodb/arango-database.js b/js/client/modules/@arangodb/arango-database.js index 7820c4fcbd..6208f0b063 100644 --- a/js/client/modules/@arangodb/arango-database.js +++ b/js/client/modules/@arangodb/arango-database.js @@ -526,12 +526,13 @@ ArangoDatabase.prototype._dropIndex = function (id) { // / @brief returns the database version // ////////////////////////////////////////////////////////////////////////////// -ArangoDatabase.prototype._version = function () { - var requestResult = this._connection.GET('/_api/version'); +ArangoDatabase.prototype._version = function (details) { + var requestResult = this._connection.GET('/_api/version' + + (details ? '?details=true' : '')); arangosh.checkRequestResult(requestResult); - return requestResult.version; + return details ? requestResult : requestResult.version; }; // ////////////////////////////////////////////////////////////////////////////// diff --git a/js/server/tests/aql/aql-general-graph-28.js b/js/server/tests/aql/aql-general-graph-28.js index b39ee59f1e..3495e13f91 100644 --- a/js/server/tests/aql/aql-general-graph-28.js +++ b/js/server/tests/aql/aql-general-graph-28.js @@ -1551,7 +1551,7 @@ function ahuacatlQueryGeneralTraversalTestSuite() { ); }, - testGRAPH_SHOTEST_PATH_with_stopAtFirstMatch: function () { + testGRAPH_SHORTEST_PATH_with_stopAtFirstMatch: function () { var actual; actual = getQueryResults("FOR e IN arangodb::GRAPH_SHORTEST_PATH('werKenntWen', 'UnitTests_Frankfurter/Fritz', " + diff --git a/lib/ApplicationFeatures/WindowsServiceFeature.cpp b/lib/ApplicationFeatures/WindowsServiceFeature.cpp index 596b588c56..ee886f94dd 100644 --- a/lib/ApplicationFeatures/WindowsServiceFeature.cpp +++ b/lib/ApplicationFeatures/WindowsServiceFeature.cpp @@ -288,7 +288,7 @@ void WindowsServiceFeature::installService() { } SERVICE_DESCRIPTION description = { - "multi-model NoSQL database (version " ARANGODB_VERSION ")"}; + "multi-model NoSQL database (version " ARANGODB_VERSION_FULL ")"}; ChangeServiceConfig2(schService, SERVICE_CONFIG_DESCRIPTION, &description); std::cout << "INFO: added service with command line '" << command << "'" diff --git a/lib/Rest/Version.cpp b/lib/Rest/Version.cpp index fab153f745..945fc6a118 100644 --- a/lib/Rest/Version.cpp +++ b/lib/Rest/Version.cpp @@ -115,7 +115,7 @@ void Version::initialize() { #if USE_ENTERPRISE - Values["enterprise-version"] = ENTERPRISE_VERSION; + Values["enterprise-version"] = ARANGODB_ENTERPRISE_VERSION; #endif #if HAVE_ARANGODB_BUILD_REPOSITORY @@ -416,6 +416,8 @@ std::string Version::getDetailed() { //////////////////////////////////////////////////////////////////////////////// void Version::getVPack(VPackBuilder& dst) { + TRI_ASSERT(!dst.isClosed()); + for (auto const& it : Values) { std::string const& value = it.second; diff --git a/lib/Rest/Version.h b/lib/Rest/Version.h index b7a5a9b047..95f4927772 100644 --- a/lib/Rest/Version.h +++ b/lib/Rest/Version.h @@ -28,12 +28,33 @@ #include "Basics/build.h" +#ifdef USE_ENTERPRISE +#include "Enterprise/Version.h" + +#ifndef ARANGODB_ENTERPRISE_VERSION +#error "enterprise version number is not defined" +#endif + +#ifdef _DEBUG +#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "-DEBUG]" +#else +#define ARANGODB_VERSION_FULL ARANGODB_VERSION " " ARANGODB_ENTERPRISE_VERSION " [" TRI_PLATFORM "]" +#endif + +#else + +#ifdef ARANGODB_ENTERPRISE_VERSION +#error "enterprise version number should not be defined" +#endif + #ifdef _DEBUG #define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "-DEBUG]" #else #define ARANGODB_VERSION_FULL ARANGODB_VERSION " [" TRI_PLATFORM "]" #endif +#endif + namespace arangodb { namespace velocypack { class Builder; @@ -43,122 +64,65 @@ namespace rest { class Version { private: - ////////////////////////////////////////////////////////////////////////////// /// @brief create the version information - ////////////////////////////////////////////////////////////////////////////// - Version() = delete; Version(Version const&) = delete; Version& operator=(Version const&) = delete; public: - ////////////////////////////////////////////////////////////////////////////// /// @brief parse a version string into major, minor /// returns -1, -1 when the version string has an invalid format - ////////////////////////////////////////////////////////////////////////////// - static std::pair parseVersionString(std::string const&); - ////////////////////////////////////////////////////////////////////////////// /// @brief initialize - ////////////////////////////////////////////////////////////////////////////// - static void initialize(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get numeric server version - ////////////////////////////////////////////////////////////////////////////// - static int32_t getNumericServerVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get server version - ////////////////////////////////////////////////////////////////////////////// - static std::string getServerVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get BOOST version - ////////////////////////////////////////////////////////////////////////////// - static std::string getBoostVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get V8 version - ////////////////////////////////////////////////////////////////////////////// - static std::string getV8Version(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get OpenSSL version - ////////////////////////////////////////////////////////////////////////////// - static std::string getOpenSSLVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get libev version - ////////////////////////////////////////////////////////////////////////////// - static std::string getLibevVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get vpack version - ////////////////////////////////////////////////////////////////////////////// - static std::string getVPackVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get zlib version - ////////////////////////////////////////////////////////////////////////////// - static std::string getZLibVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get ICU version - ////////////////////////////////////////////////////////////////////////////// - static std::string getICUVersion(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get compiler - ////////////////////////////////////////////////////////////////////////////// - static std::string getCompiler(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get endianness - ////////////////////////////////////////////////////////////////////////////// - static std::string getEndianness(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get build date - ////////////////////////////////////////////////////////////////////////////// - static std::string getBuildDate(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get build repository - ////////////////////////////////////////////////////////////////////////////// - static std::string getBuildRepository(); - ////////////////////////////////////////////////////////////////////////////// /// @brief return a server version string - ////////////////////////////////////////////////////////////////////////////// - static std::string getVerboseVersionString(); - ////////////////////////////////////////////////////////////////////////////// /// @brief get detailed version information as a (multi-line) string - ////////////////////////////////////////////////////////////////////////////// - static std::string getDetailed(); - ////////////////////////////////////////////////////////////////////////////// /// @brief VelocyPack all data - ////////////////////////////////////////////////////////////////////////////// - static void getVPack(arangodb::velocypack::Builder&); public: diff --git a/lib/Rest/VppMessage.h b/lib/Rest/VppMessage.h index 2b62ee7efe..8db20b9bc7 100644 --- a/lib/Rest/VppMessage.h +++ b/lib/Rest/VppMessage.h @@ -70,7 +70,7 @@ struct VppInputMessage { if (!_payload.empty()) { return _payload.front(); } - return VPackSlice{}; + return VPackSlice::noneSlice(); } std::vector const& payloads() const { return _payload; } @@ -111,7 +111,7 @@ struct VPackMessageNoOwnBuffer { if (_payloads.size() && _generateBody) { return _payloads.front(); } - return arangodb::basics::VelocyPackHelper::NullValue(); + return VPackSlice::noneSlice(); } std::vector payloads() { return _payloads; }