mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into bug-fix/internal-issue-#345
This commit is contained in:
commit
04bb3da337
|
@ -153,6 +153,7 @@ if (USE_IRESEARCH)
|
|||
set(IRESEARCH_EXCLUDE_STATIC_THIRD_PARTY_LIBS TRUE) # disable linking in of 3rd party libraries automatically
|
||||
find_package(IResearch REQUIRED) # set IRESEARCH_BUILD_DIR
|
||||
|
||||
set(CMAKE_MACOSX_RPATH ON) # suppress cmake warning (use same value as cmake default)
|
||||
set(CMAKE_MODULE_PATH_ORIGINAL ${CMAKE_MODULE_PATH}) # remember CMAKE_MODULE_PATH
|
||||
list(APPEND CMAKE_MODULE_PATH
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/cmake" # cmake overrides (must be first)
|
||||
|
|
|
@ -289,7 +289,7 @@ if(MSVC)
|
|||
MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory iql
|
||||
COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy || bison --graph --report=all -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy || bison --graph --report=all -Wnone -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy
|
||||
)
|
||||
else()
|
||||
|
@ -298,7 +298,7 @@ else()
|
|||
MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory iql
|
||||
COMMAND bison --graph --report=all -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
COMMAND bison --graph --report=all -Wnone -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
|
||||
)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -256,18 +256,25 @@ irs::analysis::analyzer::ptr construct(
|
|||
}
|
||||
}
|
||||
|
||||
// interpret the cache_key as a locale name
|
||||
std::string locale_name(cache_key.c_str(), cache_key.size());
|
||||
auto locale = irs::locale_utils::locale(locale_name);
|
||||
ignored_words_t buf;
|
||||
try {
|
||||
// interpret the cache_key as a locale name
|
||||
std::string locale_name(cache_key.c_str(), cache_key.size());
|
||||
auto locale = irs::locale_utils::locale(locale_name);
|
||||
ignored_words_t buf;
|
||||
|
||||
if (!get_ignored_words(buf, locale)) {
|
||||
IR_FRMT_WARN("Failed to retrieve 'ignored_words' while constructing text_token_stream with cache key: %s", cache_key.c_str());
|
||||
if (!get_ignored_words(buf, locale)) {
|
||||
IR_FRMT_WARN("Failed to retrieve 'ignored_words' while constructing text_token_stream with cache key: %s", cache_key.c_str());
|
||||
|
||||
return nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return construct(cache_key, locale, std::move(buf));
|
||||
} catch (...) {
|
||||
IR_FRMT_ERROR("Caught error while constructing text_token_stream cache key: %s", cache_key.c_str());
|
||||
IR_LOG_EXCEPTION();
|
||||
}
|
||||
|
||||
return construct(cache_key, locale, std::move(buf));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -409,34 +416,41 @@ irs::analysis::analyzer::ptr make_json(const irs::string_ref& args) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static const rapidjson::Value empty;
|
||||
auto locale = irs::locale_utils::locale(json["locale"].GetString());
|
||||
auto& ignored_words = json.HasMember("ignored_words") ? json["ignored_words"] : empty;
|
||||
auto& ignored_words_path = json.HasMember("ignored_words_path") ? json["ignored_words_path"] : empty;
|
||||
try {
|
||||
static const rapidjson::Value empty;
|
||||
auto locale = irs::locale_utils::locale(json["locale"].GetString());
|
||||
auto& ignored_words = json.HasMember("ignored_words") ? json["ignored_words"] : empty;
|
||||
auto& ignored_words_path = json.HasMember("ignored_words_path") ? json["ignored_words_path"] : empty;
|
||||
|
||||
if (!ignored_words.IsArray()) {
|
||||
return ignored_words_path.IsString()
|
||||
? construct(args, locale, ignored_words_path.GetString())
|
||||
: construct(args, locale)
|
||||
;
|
||||
}
|
||||
|
||||
ignored_words_t buf;
|
||||
|
||||
for (auto itr = ignored_words.Begin(), end = ignored_words.End(); itr != end; ++itr) {
|
||||
if (!itr->IsString()) {
|
||||
IR_FRMT_WARN("Non-string value in 'ignored_words' while constructing text_token_stream from jSON arguments: %s", args.c_str());
|
||||
|
||||
return nullptr;
|
||||
if (!ignored_words.IsArray()) {
|
||||
return ignored_words_path.IsString()
|
||||
? construct(args, locale, ignored_words_path.GetString())
|
||||
: construct(args, locale)
|
||||
;
|
||||
}
|
||||
|
||||
buf.emplace(itr->GetString());
|
||||
ignored_words_t buf;
|
||||
|
||||
for (auto itr = ignored_words.Begin(), end = ignored_words.End(); itr != end; ++itr) {
|
||||
if (!itr->IsString()) {
|
||||
IR_FRMT_WARN("Non-string value in 'ignored_words' while constructing text_token_stream from jSON arguments: %s", args.c_str());
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
buf.emplace(itr->GetString());
|
||||
}
|
||||
|
||||
return ignored_words_path.IsString()
|
||||
? construct(args, locale, ignored_words_path.GetString(), std::move(buf))
|
||||
: construct(args, locale, std::move(buf))
|
||||
;
|
||||
} catch (...) {
|
||||
IR_FRMT_ERROR("Caught error while constructing text_token_stream from jSON arguments: %s", args.c_str());
|
||||
IR_LOG_EXCEPTION();
|
||||
}
|
||||
|
||||
return ignored_words_path.IsString()
|
||||
? construct(args, locale, ignored_words_path.GetString(), std::move(buf))
|
||||
: construct(args, locale, std::move(buf))
|
||||
;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -78,7 +78,7 @@ inline int path_stats(file_stat_t& info, const file_path_t path) {
|
|||
auto parts = irs::file_utils::path_parts(path);
|
||||
|
||||
return file_stat(
|
||||
parts.basename.null() ? std::wstring(parts.dirname).c_str() : path,
|
||||
parts.basename.empty() ? std::wstring(parts.dirname).c_str() : path,
|
||||
&info
|
||||
);
|
||||
#else
|
||||
|
@ -517,7 +517,7 @@ bool exists(bool& result, const file_path_t file) NOEXCEPT {
|
|||
|
||||
result = 0 == path_stats(info, file);
|
||||
|
||||
if (!result) {
|
||||
if (!result && ENOENT != errno) {
|
||||
auto path = boost::locale::conv::utf_to_utf<char>(file);
|
||||
|
||||
IR_FRMT_ERROR("Failed to get stat, error %d path: %s", errno, path.c_str());
|
||||
|
@ -532,16 +532,16 @@ bool exists_directory(bool& result, const file_path_t name) NOEXCEPT {
|
|||
|
||||
result = 0 == path_stats(info, name);
|
||||
|
||||
if (!result) {
|
||||
auto path = boost::locale::conv::utf_to_utf<char>(name);
|
||||
|
||||
IR_FRMT_ERROR("Failed to get stat, error %d path: %s", errno, path.c_str());
|
||||
} else {
|
||||
if (result) {
|
||||
#ifdef _WIN32
|
||||
result = (info.st_mode & _S_IFDIR) > 0;
|
||||
#else
|
||||
result = (info.st_mode & S_IFDIR) > 0;
|
||||
#endif
|
||||
} else if (ENOENT != errno) {
|
||||
auto path = boost::locale::conv::utf_to_utf<char>(name);
|
||||
|
||||
IR_FRMT_ERROR("Failed to get stat, error %d path: %s", errno, path.c_str());
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -553,16 +553,16 @@ bool exists_file(bool& result, const file_path_t name) NOEXCEPT {
|
|||
|
||||
result = 0 == path_stats(info, name);
|
||||
|
||||
if (!result) {
|
||||
auto path = boost::locale::conv::utf_to_utf<char>(name);
|
||||
|
||||
IR_FRMT_ERROR("Failed to get stat, error %d path: %s", errno, path.c_str());
|
||||
} else {
|
||||
if (result) {
|
||||
#ifdef _WIN32
|
||||
result = (info.st_mode & _S_IFREG) > 0;
|
||||
#else
|
||||
result = (info.st_mode & S_IFREG) > 0;
|
||||
#endif
|
||||
} else if (ENOENT != errno) {
|
||||
auto path = boost::locale::conv::utf_to_utf<char>(name);
|
||||
|
||||
IR_FRMT_ERROR("Failed to get stat, error %d path: %s", errno, path.c_str());
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* Improvement: The AQL query planner in cluster is now a bit more clever and
|
||||
can prepare AQL queries with less network overhead.
|
||||
This should speed up simple queries in cluster mode, on complex queries it
|
||||
will most likely not show any performance effect.
|
||||
It will especially show effects on collections with a very high amount of Shards.
|
||||
|
||||
* removed remainders of dysfunctional `/_admin/cluster-test` and `/_admin/clusterCheckPort`
|
||||
API endpoints and removed them from documentation
|
||||
|
||||
|
|
|
@ -566,6 +566,13 @@ if (USE_MAINTAINER_MODE)
|
|||
find_program(AWK_EXECUTABLE awk)
|
||||
endif ()
|
||||
|
||||
option(USE_CATCH_TESTS "Compile catch C++ tests" ON)
|
||||
if (USE_CATCH_TESTS)
|
||||
add_definitions("-DTEST_VIRTUAL=virtual")
|
||||
else()
|
||||
add_definitions("-DTEST_VIRTUAL=")
|
||||
endif()
|
||||
|
||||
include(debugInformation)
|
||||
find_program(READELF_EXECUTABLE readelf)
|
||||
detect_binary_id_type(CMAKE_DEBUG_FILENAMES_SHA_SUM)
|
||||
|
@ -1012,7 +1019,6 @@ add_subdirectory(arangod)
|
|||
|
||||
add_subdirectory(Documentation)
|
||||
|
||||
option(USE_CATCH_TESTS "Compile catch C++ tests" ON)
|
||||
if (USE_CATCH_TESTS)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
@RESTURLPARAMETERS
|
||||
|
||||
@RESTDESCRIPTION
|
||||
TBD
|
||||
Returns an object containing the definition of the view identified by *view-name*.
|
||||
|
||||
@RESTURLPARAM{view-name,string,required}
|
||||
The name of the view.
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "AqlResult.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
ExecutionEngineResult::ExecutionEngineResult() : Result(), _engine(nullptr) {}
|
||||
ExecutionEngineResult::ExecutionEngineResult(int num)
|
||||
: Result(num), _engine(nullptr) {}
|
||||
ExecutionEngineResult::ExecutionEngineResult(int num, std::string const& msg)
|
||||
: Result(num, msg), _engine(nullptr) {}
|
||||
ExecutionEngineResult::ExecutionEngineResult(int num, std::string&& msg)
|
||||
: Result(num, msg), _engine(nullptr) {}
|
||||
ExecutionEngineResult::ExecutionEngineResult(ExecutionEngine* engine)
|
||||
: Result(), _engine(engine) {}
|
||||
|
||||
// No responsibilty for the pointer
|
||||
ExecutionEngineResult::~ExecutionEngineResult() {}
|
||||
|
||||
ExecutionEngine* ExecutionEngineResult::engine() const { return _engine; }
|
|
@ -0,0 +1,55 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_AQL_RESULT_H
|
||||
#define ARANGOD_AQL_AQL_RESULT_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Result.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace aql {
|
||||
|
||||
class ExecutionEngine;
|
||||
|
||||
class ExecutionEngineResult : public Result {
|
||||
public:
|
||||
ExecutionEngineResult();
|
||||
ExecutionEngineResult(int errorNumber);
|
||||
ExecutionEngineResult(int errorNumber, std::string const& errorMessage);
|
||||
ExecutionEngineResult(int errorNumber, std::string&& errorMessage);
|
||||
ExecutionEngineResult(ExecutionEngine*);
|
||||
|
||||
~ExecutionEngineResult();
|
||||
|
||||
ExecutionEngine* engine() const;
|
||||
|
||||
private:
|
||||
ExecutionEngine* _engine;
|
||||
};
|
||||
|
||||
} // aql
|
||||
} // arangodb
|
||||
|
||||
#endif
|
|
@ -128,6 +128,23 @@ LogicalCollection* AqlTransaction::documentCollection(TRI_voc_cid_t cid) {
|
|||
|
||||
int AqlTransaction::lockCollections() { return state()->lockCollections(); }
|
||||
|
||||
/// @brief count the number of documents in a collection
|
||||
/// Handle locks based on the collections known to this transaction
|
||||
/// (Coordinator only)
|
||||
OperationResult AqlTransaction::count(std::string const& collectionName,
|
||||
bool aggregate) {
|
||||
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
|
||||
|
||||
if (_state->isCoordinator()) {
|
||||
// If the collection is known to this transaction we do not need to lock on DBServers (locked already)
|
||||
// If it is not known we need to lock
|
||||
bool needsToLock = (_collections.find(collectionName) == _collections.end());
|
||||
return countCoordinator(collectionName, aggregate, needsToLock);
|
||||
}
|
||||
|
||||
return countLocal(collectionName);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -87,6 +87,11 @@ class AqlTransaction : public transaction::Methods {
|
|||
/// order via an HTTP call. This method is used to implement that HTTP action.
|
||||
int lockCollections() override;
|
||||
|
||||
/// @brief count the number of documents in a collection
|
||||
/// Handle locks based on the collections known to this transaction
|
||||
/// (Coordinator only)
|
||||
OperationResult count(std::string const& collectionName, bool aggregate) override;
|
||||
|
||||
protected:
|
||||
AqlTransaction(
|
||||
std::shared_ptr<transaction::Context> const& transactionContext,
|
||||
|
|
|
@ -716,6 +716,7 @@ bool ScatterBlock::hasMoreForShard(std::string const& shardId) {
|
|||
|
||||
size_t clientId = getClientId(shardId);
|
||||
|
||||
TRI_ASSERT(_doneForClient.size() > clientId);
|
||||
if (_doneForClient.at(clientId)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -742,6 +743,7 @@ int64_t ScatterBlock::remainingForShard(std::string const& shardId) {
|
|||
DEBUG_BEGIN_BLOCK();
|
||||
|
||||
size_t clientId = getClientId(shardId);
|
||||
TRI_ASSERT(_doneForClient.size() > clientId);
|
||||
if (_doneForClient.at(clientId)) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -777,10 +779,12 @@ int ScatterBlock::getOrSkipSomeForShard(size_t atLeast, size_t atMost,
|
|||
|
||||
size_t clientId = getClientId(shardId);
|
||||
|
||||
TRI_ASSERT(_doneForClient.size() > clientId);
|
||||
if (_doneForClient.at(clientId)) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_posForClient.size() > clientId);
|
||||
std::pair<size_t, size_t> pos = _posForClient.at(clientId);
|
||||
|
||||
// pull more blocks from dependency if necessary . . .
|
||||
|
@ -912,10 +916,12 @@ bool DistributeBlock::hasMoreForShard(std::string const& shardId) {
|
|||
DEBUG_BEGIN_BLOCK();
|
||||
|
||||
size_t clientId = getClientId(shardId);
|
||||
TRI_ASSERT(_doneForClient.size() > clientId);
|
||||
if (_doneForClient.at(clientId)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_distBuffer.size() > clientId);
|
||||
if (!_distBuffer.at(clientId).empty()) {
|
||||
return true;
|
||||
}
|
||||
|
@ -942,6 +948,7 @@ int DistributeBlock::getOrSkipSomeForShard(size_t atLeast, size_t atMost,
|
|||
|
||||
size_t clientId = getClientId(shardId);
|
||||
|
||||
TRI_ASSERT(_doneForClient.size() > clientId);
|
||||
if (_doneForClient.at(clientId)) {
|
||||
traceGetSomeEnd(result);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "EngineInfoContainerCoordinator.h"
|
||||
|
||||
#include "Aql/AqlResult.h"
|
||||
#include "Aql/ClusterBlocks.h"
|
||||
#include "Aql/ClusterNodes.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- Coordinator Container
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
EngineInfoContainerCoordinator::EngineInfo::EngineInfo(QueryId id,
|
||||
size_t idOfRemoteNode)
|
||||
: _id(id), _idOfRemoteNode(idOfRemoteNode) {
|
||||
TRI_ASSERT(_nodes.empty());
|
||||
}
|
||||
|
||||
EngineInfoContainerCoordinator::EngineInfo::~EngineInfo() {
|
||||
// This container is not responsible for nodes, they are managed by the AST
|
||||
// somewhere else
|
||||
}
|
||||
|
||||
EngineInfoContainerCoordinator::EngineInfo::EngineInfo(EngineInfo const&& other)
|
||||
: _id(other._id),
|
||||
_nodes(std::move(other._nodes)),
|
||||
_idOfRemoteNode(other._idOfRemoteNode) {}
|
||||
|
||||
void EngineInfoContainerCoordinator::EngineInfo::addNode(ExecutionNode* en) {
|
||||
_nodes.emplace_back(en);
|
||||
}
|
||||
|
||||
Result EngineInfoContainerCoordinator::EngineInfo::buildEngine(
|
||||
Query* query, QueryRegistry* queryRegistry, std::string const& dbname,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_map<std::string, std::string> const& dbServerQueryIds,
|
||||
std::vector<uint64_t>& coordinatorQueryIds,
|
||||
std::unordered_set<ShardID> const* lockedShards) const {
|
||||
TRI_ASSERT(!_nodes.empty());
|
||||
TRI_ASSERT(lockedShards != nullptr);
|
||||
{
|
||||
auto uniqEngine = std::make_unique<ExecutionEngine>(query);
|
||||
query->setEngine(uniqEngine.release());
|
||||
}
|
||||
|
||||
auto engine = query->engine();
|
||||
|
||||
{
|
||||
auto cpyLockedShards =
|
||||
std::make_unique<std::unordered_set<std::string>>(*lockedShards);
|
||||
engine->setLockedShards(cpyLockedShards.release());
|
||||
}
|
||||
|
||||
engine->createBlocks(_nodes, {}, restrictToShards, dbServerQueryIds);
|
||||
|
||||
TRI_ASSERT(engine->root() != nullptr);
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Storing Coordinator engine: "
|
||||
<< _id;
|
||||
|
||||
// For _id == 0 this thread will always maintain the handle to
|
||||
// the engine and will clean up. We do not keep track of it seperately
|
||||
if (_id != 0) {
|
||||
try {
|
||||
queryRegistry->insert(_id, query, 600.0);
|
||||
} catch (basics::Exception const& e) {
|
||||
return {e.code(), e.message()};
|
||||
} catch (std::exception const& e) {
|
||||
return {TRI_ERROR_INTERNAL, e.what()};
|
||||
} catch (...) {
|
||||
return {TRI_ERROR_INTERNAL};
|
||||
}
|
||||
|
||||
coordinatorQueryIds.emplace_back(_id);
|
||||
}
|
||||
|
||||
return {TRI_ERROR_NO_ERROR};
|
||||
}
|
||||
|
||||
EngineInfoContainerCoordinator::EngineInfoContainerCoordinator() {
|
||||
// We always start with an empty coordinator snippet
|
||||
_engines.emplace_back(0, 0);
|
||||
_engineStack.emplace(0);
|
||||
}
|
||||
|
||||
EngineInfoContainerCoordinator::~EngineInfoContainerCoordinator() {}
|
||||
|
||||
void EngineInfoContainerCoordinator::addNode(ExecutionNode* node) {
|
||||
TRI_ASSERT(node->getType() != ExecutionNode::INDEX &&
|
||||
node->getType() != ExecutionNode::ENUMERATE_COLLECTION);
|
||||
TRI_ASSERT(!_engines.empty());
|
||||
TRI_ASSERT(!_engineStack.empty());
|
||||
size_t idx = _engineStack.top();
|
||||
_engines[idx].addNode(node);
|
||||
}
|
||||
|
||||
void EngineInfoContainerCoordinator::openSnippet(size_t idOfRemoteNode) {
|
||||
_engineStack.emplace(_engines.size()); // Insert next id
|
||||
QueryId id = TRI_NewTickServer();
|
||||
_engines.emplace_back(id, idOfRemoteNode);
|
||||
}
|
||||
|
||||
QueryId EngineInfoContainerCoordinator::closeSnippet() {
|
||||
TRI_ASSERT(!_engines.empty());
|
||||
TRI_ASSERT(!_engineStack.empty());
|
||||
|
||||
size_t idx = _engineStack.top();
|
||||
QueryId id = _engines[idx].queryId();
|
||||
_engineStack.pop();
|
||||
return id;
|
||||
}
|
||||
|
||||
ExecutionEngineResult EngineInfoContainerCoordinator::buildEngines(
|
||||
Query* query, QueryRegistry* registry, std::string const& dbname,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_map<std::string, std::string>& dbServerQueryIds,
|
||||
std::unordered_set<ShardID> const* lockedShards) const {
|
||||
TRI_ASSERT(_engineStack.size() == 1);
|
||||
TRI_ASSERT(_engineStack.top() == 0);
|
||||
|
||||
std::vector<uint64_t> coordinatorQueryIds{};
|
||||
auto cleanup = [&]() {
|
||||
for (auto const& it : coordinatorQueryIds) {
|
||||
registry->destroy(dbname, it, TRI_ERROR_INTERNAL);
|
||||
}
|
||||
};
|
||||
TRI_DEFER(cleanup());
|
||||
|
||||
bool first = true;
|
||||
Query* localQuery = query;
|
||||
try {
|
||||
for (auto const& info : _engines) {
|
||||
if (!first) {
|
||||
// need a new query instance on the coordinator
|
||||
localQuery = query->clone(PART_DEPENDENT, false);
|
||||
if (localQuery == nullptr) {
|
||||
return {TRI_ERROR_INTERNAL, "cannot clone query"};
|
||||
}
|
||||
}
|
||||
try {
|
||||
auto res = info.buildEngine(localQuery, registry, dbname,
|
||||
restrictToShards, dbServerQueryIds,
|
||||
coordinatorQueryIds, lockedShards);
|
||||
if (!res.ok()) {
|
||||
if (!first) {
|
||||
// We need to clean up this query.
|
||||
// It is not in the registry.
|
||||
delete localQuery;
|
||||
}
|
||||
return {res.errorNumber(), res.errorMessage()};
|
||||
}
|
||||
} catch (...) {
|
||||
// We do not catch any other error here.
|
||||
// All errors we throw are handled by the result
|
||||
// above
|
||||
if (!first) {
|
||||
// We need to clean up this query.
|
||||
// It is not in the registry.
|
||||
delete localQuery;
|
||||
}
|
||||
return {TRI_ERROR_INTERNAL};
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
} catch (basics::Exception const& ex) {
|
||||
return {ex.code(), ex.message()};
|
||||
} catch (std::exception const& ex) {
|
||||
return {TRI_ERROR_INTERNAL, ex.what()};
|
||||
} catch (...) {
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
// This deactivates the defered cleanup.
|
||||
// From here on we rely on the AQL shutdown mechanism.
|
||||
coordinatorQueryIds.clear();
|
||||
return query->engine();
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_ENGINE_INFO_CONTAINER_COORDINATOR_H
|
||||
#define ARANGOD_AQL_ENGINE_INFO_CONTAINER_COORDINATOR_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include "Aql/types.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
|
||||
#include <stack>
|
||||
namespace arangodb {
|
||||
|
||||
|
||||
namespace aql {
|
||||
|
||||
class ExecutionEngine;
|
||||
class ExecutionEngineResult;
|
||||
class ExecutionNode;
|
||||
class Query;
|
||||
class QueryRegistry;
|
||||
|
||||
|
||||
|
||||
class EngineInfoContainerCoordinator {
|
||||
private:
|
||||
struct EngineInfo {
|
||||
public:
|
||||
EngineInfo(QueryId id, size_t idOfRemoteNode);
|
||||
~EngineInfo();
|
||||
|
||||
EngineInfo(EngineInfo&) = delete;
|
||||
EngineInfo(EngineInfo const& other) = delete;
|
||||
EngineInfo(EngineInfo const&& other);
|
||||
|
||||
void addNode(ExecutionNode* en);
|
||||
|
||||
Result buildEngine(Query* query, QueryRegistry* queryRegistry,
|
||||
std::string const& dbname,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_map<std::string, std::string> const& dbServerQueryIds,
|
||||
std::vector<uint64_t>& coordinatorQueryIds,
|
||||
std::unordered_set<ShardID> const* lockedShards) const;
|
||||
|
||||
QueryId queryId() const {
|
||||
return _id;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
// The generated id how we can find this query part
|
||||
// in the coordinators registry.
|
||||
QueryId const _id;
|
||||
|
||||
// The nodes belonging to this plan.
|
||||
std::vector<ExecutionNode*> _nodes;
|
||||
|
||||
// id of the remote node this plan collects data from
|
||||
size_t _idOfRemoteNode;
|
||||
|
||||
};
|
||||
|
||||
public:
|
||||
EngineInfoContainerCoordinator();
|
||||
|
||||
~EngineInfoContainerCoordinator();
|
||||
|
||||
// Insert a new node into the last engine on the stack
|
||||
void addNode(ExecutionNode* node);
|
||||
|
||||
// Open a new snippet, which is connected to the given remoteNode id
|
||||
void openSnippet(size_t idOfRemoteNode);
|
||||
|
||||
// Close the currently open snippet.
|
||||
// This will finallizes the EngineInfo from the given information
|
||||
// This will intentionally NOT insert the Engines into the query
|
||||
// registry for easier cleanup
|
||||
// Returns the queryId of the closed snippet
|
||||
QueryId closeSnippet();
|
||||
|
||||
// Build the Engines on the coordinator
|
||||
// * Creates the ExecutionBlocks
|
||||
// * Injects all Parts but the First one into QueryRegistery
|
||||
// Return the first engine which is not added in the Registry
|
||||
ExecutionEngineResult buildEngines(
|
||||
Query* query, QueryRegistry* registry, std::string const& dbname,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_map<std::string, std::string>& queryIds,
|
||||
std::unordered_set<ShardID> const* lockedShards) const;
|
||||
|
||||
private:
|
||||
// @brief List of EngineInfos to distribute accross the cluster
|
||||
std::vector<EngineInfo> _engines;
|
||||
|
||||
std::stack<size_t> _engineStack;
|
||||
};
|
||||
|
||||
} // aql
|
||||
} // arangodb
|
||||
|
||||
#endif
|
|
@ -0,0 +1,749 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "EngineInfoContainerDBServer.h"
|
||||
|
||||
#include "Aql/ClusterNodes.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/GraphNode.h"
|
||||
#include "Aql/IndexNode.h"
|
||||
#include "Aql/ModificationNodes.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Cluster/TraverserEngineRegistry.h"
|
||||
#include "Graph/BaseOptions.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
static const double SETUP_TIMEOUT = 25.0;
|
||||
|
||||
EngineInfoContainerDBServer::EngineInfo::EngineInfo(size_t idOfRemoteNode)
|
||||
: _idOfRemoteNode(idOfRemoteNode), _otherId(0), _collection(nullptr) {}
|
||||
|
||||
EngineInfoContainerDBServer::EngineInfo::~EngineInfo() {
|
||||
// This container is not responsible for nodes
|
||||
// they are managed by the AST somewhere else
|
||||
// We are also not responsible for the collection.
|
||||
TRI_ASSERT(!_nodes.empty());
|
||||
}
|
||||
|
||||
EngineInfoContainerDBServer::EngineInfo::EngineInfo(EngineInfo const&& other)
|
||||
: _nodes(std::move(other._nodes)),
|
||||
_idOfRemoteNode(other._idOfRemoteNode),
|
||||
_otherId(other._otherId),
|
||||
_collection(other._collection) {
|
||||
TRI_ASSERT(!_nodes.empty());
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::EngineInfo::connectQueryId(QueryId id) {
|
||||
_otherId = id;
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::EngineInfo::addNode(ExecutionNode* node) {
|
||||
_nodes.emplace_back(node);
|
||||
}
|
||||
|
||||
Collection const* EngineInfoContainerDBServer::EngineInfo::collection() const {
|
||||
return _collection;
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::EngineInfo::collection(Collection* col) {
|
||||
_collection = col;
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::EngineInfo::serializeSnippet(
|
||||
Query* query, ShardID id, VPackBuilder& infoBuilder,
|
||||
bool isResponsibleForInit) const {
|
||||
// The Key is required to build up the queryId mapping later
|
||||
infoBuilder.add(VPackValue(
|
||||
arangodb::basics::StringUtils::itoa(_idOfRemoteNode) + ":" + id));
|
||||
|
||||
TRI_ASSERT(!_nodes.empty());
|
||||
// copy the relevant fragment of the plan for each shard
|
||||
// Note that in these parts of the query there are no SubqueryNodes,
|
||||
// since they are all on the coordinator!
|
||||
// Also note: As _collection is set to the correct current shard
|
||||
// this clone does the translation collection => shardId implicitly
|
||||
// at the relevant parts of the query.
|
||||
|
||||
_collection->setCurrentShard(id);
|
||||
|
||||
ExecutionPlan plan(query->ast());
|
||||
ExecutionNode* previous = nullptr;
|
||||
|
||||
// for (ExecutionNode const* current : _nodes) {
|
||||
for (auto enIt = _nodes.rbegin(); enIt != _nodes.rend(); ++enIt) {
|
||||
ExecutionNode const* current = *enIt;
|
||||
auto clone = current->clone(&plan, false, false);
|
||||
// UNNECESSARY, because clone does it: plan.registerNode(clone);
|
||||
|
||||
if (current->getType() == ExecutionNode::REMOTE) {
|
||||
auto rem = static_cast<RemoteNode*>(clone);
|
||||
// update the remote node with the information about the query
|
||||
rem->server("server:" + arangodb::ServerState::instance()->getId());
|
||||
rem->ownName(id);
|
||||
rem->queryId(_otherId);
|
||||
|
||||
// only one of the remote blocks is responsible for forwarding the
|
||||
// initializeCursor and shutDown requests
|
||||
// for simplicity, we always use the first remote block if we have more
|
||||
// than one
|
||||
|
||||
// Do we still need this???
|
||||
rem->isResponsibleForInitializeCursor(isResponsibleForInit);
|
||||
}
|
||||
|
||||
if (previous != nullptr) {
|
||||
clone->addDependency(previous);
|
||||
}
|
||||
|
||||
previous = clone;
|
||||
}
|
||||
TRI_ASSERT(previous != nullptr);
|
||||
|
||||
plan.root(previous);
|
||||
plan.setVarUsageComputed();
|
||||
// Always Verbose
|
||||
plan.root()->toVelocyPack(infoBuilder, true);
|
||||
_collection->resetCurrentShard();
|
||||
}
|
||||
|
||||
EngineInfoContainerDBServer::EngineInfoContainerDBServer() {}
|
||||
|
||||
EngineInfoContainerDBServer::~EngineInfoContainerDBServer() {}
|
||||
|
||||
void EngineInfoContainerDBServer::addNode(ExecutionNode* node) {
|
||||
TRI_ASSERT(!_engineStack.empty());
|
||||
_engineStack.top()->addNode(node);
|
||||
switch (node->getType()) {
|
||||
case ExecutionNode::ENUMERATE_COLLECTION:
|
||||
handleCollection(
|
||||
static_cast<EnumerateCollectionNode*>(node)->collection(),
|
||||
AccessMode::Type::READ, true);
|
||||
break;
|
||||
case ExecutionNode::INDEX:
|
||||
handleCollection(static_cast<IndexNode*>(node)->collection(),
|
||||
AccessMode::Type::READ, true);
|
||||
break;
|
||||
case ExecutionNode::INSERT:
|
||||
case ExecutionNode::UPDATE:
|
||||
case ExecutionNode::REMOVE:
|
||||
case ExecutionNode::REPLACE:
|
||||
case ExecutionNode::UPSERT:
|
||||
handleCollection(static_cast<ModificationNode*>(node)->collection(),
|
||||
AccessMode::Type::WRITE, true);
|
||||
break;
|
||||
default:
|
||||
// Do nothing
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::openSnippet(size_t idOfRemoteNode) {
|
||||
_engineStack.emplace(std::make_shared<EngineInfo>(idOfRemoteNode));
|
||||
}
|
||||
|
||||
// Closing a snippet means:
|
||||
// 1. pop it off the stack.
|
||||
// 2. Wire it up with the given coordinator ID
|
||||
// 3. Move it in the Collection => Engine map
|
||||
|
||||
void EngineInfoContainerDBServer::closeSnippet(QueryId coordinatorEngineId) {
|
||||
TRI_ASSERT(!_engineStack.empty());
|
||||
auto e = _engineStack.top();
|
||||
_engineStack.pop();
|
||||
|
||||
e->connectQueryId(coordinatorEngineId);
|
||||
TRI_ASSERT(e->collection() != nullptr);
|
||||
auto& engine = _engines[e->collection()];
|
||||
engine.emplace_back(std::move(e));
|
||||
}
|
||||
|
||||
// This first defines the lock required for this collection
|
||||
// Then we update the collection pointer of the last engine.
|
||||
|
||||
#ifndef USE_ENTERPRISE
|
||||
void EngineInfoContainerDBServer::handleCollection(
|
||||
Collection const* col, AccessMode::Type const& accessType,
|
||||
bool updateCollection) {
|
||||
auto it = _collections.find(col);
|
||||
if (it == _collections.end()) {
|
||||
_collections.emplace(col, accessType);
|
||||
} else {
|
||||
if (it->second < accessType) {
|
||||
// We need to upgrade the lock
|
||||
it->second = accessType;
|
||||
}
|
||||
}
|
||||
if (updateCollection) {
|
||||
TRI_ASSERT(!_engineStack.empty());
|
||||
auto e = _engineStack.top();
|
||||
// ... const_cast
|
||||
e->collection(const_cast<Collection*>(col));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
EngineInfoContainerDBServer::DBServerInfo::DBServerInfo() {}
|
||||
EngineInfoContainerDBServer::DBServerInfo::~DBServerInfo() {}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::addShardLock(
|
||||
AccessMode::Type const& lock, ShardID const& id) {
|
||||
_shardLocking[lock].emplace_back(id);
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::addEngine(
|
||||
std::shared_ptr<EngineInfoContainerDBServer::EngineInfo> info,
|
||||
ShardID const& id) {
|
||||
_engineInfos[info].emplace_back(id);
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::buildMessage(
|
||||
Query* query, VPackBuilder& infoBuilder) const {
|
||||
TRI_ASSERT(infoBuilder.isEmpty());
|
||||
|
||||
infoBuilder.openObject();
|
||||
infoBuilder.add(VPackValue("lockInfo"));
|
||||
infoBuilder.openObject();
|
||||
for (auto const& shardLocks : _shardLocking) {
|
||||
infoBuilder.add(VPackValue(AccessMode::typeString(shardLocks.first)));
|
||||
infoBuilder.openArray();
|
||||
for (auto const& s : shardLocks.second) {
|
||||
infoBuilder.add(VPackValue(s));
|
||||
}
|
||||
infoBuilder.close(); // The array
|
||||
}
|
||||
infoBuilder.close(); // lockInfo
|
||||
infoBuilder.add(VPackValue("options"));
|
||||
injectQueryOptions(query, infoBuilder);
|
||||
infoBuilder.add(VPackValue("variables"));
|
||||
// This will open and close an Object.
|
||||
query->ast()->variables()->toVelocyPack(infoBuilder);
|
||||
infoBuilder.add(VPackValue("snippets"));
|
||||
infoBuilder.openObject();
|
||||
|
||||
for (auto const& it : _engineInfos) {
|
||||
bool isResponsibleForInit = true;
|
||||
for (auto const& s : it.second) {
|
||||
it.first->serializeSnippet(query, s, infoBuilder, isResponsibleForInit);
|
||||
isResponsibleForInit = false;
|
||||
}
|
||||
}
|
||||
infoBuilder.close(); // snippets
|
||||
injectTraverserEngines(infoBuilder);
|
||||
infoBuilder.close(); // Object
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::injectTraverserEngines(
|
||||
VPackBuilder& infoBuilder) const {
|
||||
if (_traverserEngineInfos.empty()) {
|
||||
return;
|
||||
}
|
||||
TRI_ASSERT(infoBuilder.isOpenObject());
|
||||
infoBuilder.add(VPackValue("traverserEngines"));
|
||||
infoBuilder.openArray();
|
||||
for (auto const& it : _traverserEngineInfos) {
|
||||
GraphNode* en = it.first;
|
||||
auto const& list = it.second;
|
||||
infoBuilder.openObject();
|
||||
{
|
||||
// Options
|
||||
infoBuilder.add(VPackValue("options"));
|
||||
graph::BaseOptions* opts = en->options();
|
||||
opts->buildEngineInfo(infoBuilder);
|
||||
}
|
||||
{
|
||||
// Variables
|
||||
std::vector<aql::Variable const*> vars;
|
||||
en->getConditionVariables(vars);
|
||||
if (!vars.empty()) {
|
||||
infoBuilder.add(VPackValue("variables"));
|
||||
infoBuilder.openArray();
|
||||
for (auto v : vars) {
|
||||
v->toVelocyPack(infoBuilder);
|
||||
}
|
||||
infoBuilder.close();
|
||||
}
|
||||
}
|
||||
|
||||
infoBuilder.add(VPackValue("shards"));
|
||||
infoBuilder.openObject();
|
||||
infoBuilder.add(VPackValue("vertices"));
|
||||
infoBuilder.openObject();
|
||||
for (auto const& col : list.vertexCollections) {
|
||||
infoBuilder.add(VPackValue(col.first));
|
||||
infoBuilder.openArray();
|
||||
for (auto const& v : col.second) {
|
||||
infoBuilder.add(VPackValue(v));
|
||||
}
|
||||
infoBuilder.close(); // this collection
|
||||
}
|
||||
infoBuilder.close(); // vertices
|
||||
|
||||
infoBuilder.add(VPackValue("edges"));
|
||||
infoBuilder.openArray();
|
||||
for (auto const& edgeShards : list.edgeCollections) {
|
||||
infoBuilder.openArray();
|
||||
for (auto const& e : edgeShards) {
|
||||
infoBuilder.add(VPackValue(e));
|
||||
}
|
||||
infoBuilder.close();
|
||||
}
|
||||
infoBuilder.close(); // edges
|
||||
infoBuilder.close(); // shards
|
||||
|
||||
en->enhanceEngineInfo(infoBuilder);
|
||||
|
||||
infoBuilder.close(); // base
|
||||
}
|
||||
|
||||
infoBuilder.close(); // traverserEngines
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::combineTraverserEngines(
|
||||
ServerID const& serverID, VPackSlice const ids) {
|
||||
if (ids.length() != _traverserEngineInfos.size()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_AQL_COMMUNICATION,
|
||||
"The DBServer was not able to create enough "
|
||||
"traversal engines. This can happen during "
|
||||
"failover. Please check; " +
|
||||
serverID);
|
||||
}
|
||||
auto idIter = VPackArrayIterator(ids);
|
||||
// We need to use the same order of iterating over
|
||||
// the traverserEngineInfos to wire the correct GraphNodes
|
||||
// to the correct engine ids
|
||||
for (auto const& it : _traverserEngineInfos) {
|
||||
it.first->addEngine(
|
||||
idIter.value().getNumber<traverser::TraverserEngineID>(), serverID);
|
||||
idIter.next();
|
||||
}
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::addTraverserEngine(
|
||||
GraphNode* node, TraverserEngineShardLists&& shards) {
|
||||
_traverserEngineInfos.push_back(std::make_pair(node, std::move(shards)));
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::DBServerInfo::injectQueryOptions(
|
||||
Query* query, VPackBuilder& infoBuilder) const {
|
||||
// the toVelocyPack will open & close the "options" object
|
||||
query->queryOptions().toVelocyPack(infoBuilder, true);
|
||||
}
|
||||
|
||||
std::map<ServerID, EngineInfoContainerDBServer::DBServerInfo>
|
||||
EngineInfoContainerDBServer::createDBServerMapping(
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_set<ShardID>* lockedShards) const {
|
||||
auto ci = ClusterInfo::instance();
|
||||
|
||||
std::map<ServerID, DBServerInfo> dbServerMapping;
|
||||
|
||||
for (auto const& it : _collections) {
|
||||
// it.first => Collection const*
|
||||
// it.second => Lock Type
|
||||
std::vector<std::shared_ptr<EngineInfo>> const* engines = nullptr;
|
||||
if (_engines.find(it.first) != _engines.end()) {
|
||||
engines = &_engines.find(it.first)->second;
|
||||
}
|
||||
auto shardIds = it.first->shardIds(restrictToShards);
|
||||
for (auto const& s : *(shardIds.get())) {
|
||||
lockedShards->emplace(s);
|
||||
auto const servers = ci->getResponsibleServer(s);
|
||||
if (servers == nullptr || servers->empty()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE,
|
||||
"Could not find responsible server for shard " + s);
|
||||
}
|
||||
auto responsible = servers->at(0);
|
||||
auto& mapping = dbServerMapping[responsible];
|
||||
mapping.addShardLock(it.second, s);
|
||||
if (engines != nullptr) {
|
||||
for (auto& e : *engines) {
|
||||
mapping.addEngine(e, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
prepareSatellites(dbServerMapping, restrictToShards);
|
||||
#endif
|
||||
|
||||
return dbServerMapping;
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::injectGraphNodesToMapping(
|
||||
Query* query, std::unordered_set<std::string> const& restrictToShards,
|
||||
std::map<ServerID, EngineInfoContainerDBServer::DBServerInfo>&
|
||||
dbServerMapping) const {
|
||||
if (_graphNodes.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
transaction::Methods* trx = query->trx();
|
||||
transaction::Options& trxOps = query->trx()->state()->options();
|
||||
#endif
|
||||
|
||||
auto clusterInfo = arangodb::ClusterInfo::instance();
|
||||
|
||||
/// Typedef for a complicated mapping used in TraverserEngines.
|
||||
typedef std::unordered_map<
|
||||
ServerID, EngineInfoContainerDBServer::TraverserEngineShardLists>
|
||||
Serv2ColMap;
|
||||
|
||||
for (GraphNode* en : _graphNodes) {
|
||||
// Every node needs it's own Serv2ColMap
|
||||
Serv2ColMap mappingServerToCollections;
|
||||
en->prepareOptions();
|
||||
|
||||
std::vector<std::unique_ptr<arangodb::aql::Collection>> const& edges =
|
||||
en->edgeColls();
|
||||
|
||||
// Here we create a mapping
|
||||
// ServerID => ResponsibleShards
|
||||
// Where Responsible shards is divided in edgeCollections and
|
||||
// vertexCollections
|
||||
// For edgeCollections the Ordering is important for the index access.
|
||||
// Also the same edgeCollection can be included twice (iff direction is ANY)
|
||||
size_t length = edges.size();
|
||||
|
||||
auto findServerLists = [&](ShardID const& shard) -> Serv2ColMap::iterator {
|
||||
auto serverList = clusterInfo->getResponsibleServer(shard);
|
||||
if (serverList->empty()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE,
|
||||
"Could not find responsible server for shard " + shard);
|
||||
}
|
||||
TRI_ASSERT(!serverList->empty());
|
||||
auto& leader = (*serverList)[0];
|
||||
auto pair = mappingServerToCollections.find(leader);
|
||||
if (pair == mappingServerToCollections.end()) {
|
||||
mappingServerToCollections.emplace(leader,
|
||||
TraverserEngineShardLists{length});
|
||||
pair = mappingServerToCollections.find(leader);
|
||||
}
|
||||
return pair;
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
auto shardIds = edges[i]->shardIds(restrictToShards);
|
||||
for (auto const& shard : *shardIds) {
|
||||
auto pair = findServerLists(shard);
|
||||
pair->second.edgeCollections[i].emplace_back(shard);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<arangodb::aql::Collection>> const& vertices =
|
||||
en->vertexColls();
|
||||
if (vertices.empty()) {
|
||||
std::unordered_set<std::string> knownEdges;
|
||||
for (auto const& it : edges) {
|
||||
knownEdges.emplace(it->getName());
|
||||
}
|
||||
// This case indicates we do not have a named graph. We simply use
|
||||
// ALL collections known to this query.
|
||||
std::map<std::string, Collection*>* cs =
|
||||
query->collections()->collections();
|
||||
for (auto const& collection : (*cs)) {
|
||||
if (knownEdges.find(collection.second->getName()) == knownEdges.end()) {
|
||||
// This collection is not one of the edge collections used in this
|
||||
// graph.
|
||||
auto shardIds = collection.second->shardIds(restrictToShards);
|
||||
for (ShardID const& shard : *shardIds) {
|
||||
auto pair = findServerLists(shard);
|
||||
pair->second.vertexCollections[collection.second->getName()]
|
||||
.emplace_back(shard);
|
||||
#ifdef USE_ENTERPRISE
|
||||
if (trx->isInaccessibleCollectionId(
|
||||
collection.second->getPlanId())) {
|
||||
TRI_ASSERT(
|
||||
ServerState::instance()->isSingleServerOrCoordinator());
|
||||
TRI_ASSERT(trxOps.skipInaccessibleCollections);
|
||||
pair->second.inaccessibleShards.insert(shard);
|
||||
pair->second.inaccessibleShards.insert(
|
||||
std::to_string(collection.second->getCollection()->id()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
// We have to make sure that all engines at least know all vertex
|
||||
// collections.
|
||||
// Thanks to fanout...
|
||||
for (auto const& collection : (*cs)) {
|
||||
for (auto& entry : mappingServerToCollections) {
|
||||
auto it =
|
||||
entry.second.vertexCollections.find(collection.second->getName());
|
||||
if (it == entry.second.vertexCollections.end()) {
|
||||
entry.second.vertexCollections.emplace(collection.second->getName(),
|
||||
std::vector<ShardID>());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This Traversal is started with a GRAPH. It knows all relevant
|
||||
// collections.
|
||||
for (auto const& it : vertices) {
|
||||
auto shardIds = it->shardIds(restrictToShards);
|
||||
for (ShardID const& shard : *shardIds) {
|
||||
auto pair = findServerLists(shard);
|
||||
pair->second.vertexCollections[it->getName()].emplace_back(shard);
|
||||
#ifdef USE_ENTERPRISE
|
||||
if (trx->isInaccessibleCollectionId(it->getPlanId())) {
|
||||
TRI_ASSERT(trxOps.skipInaccessibleCollections);
|
||||
pair->second.inaccessibleShards.insert(shard);
|
||||
pair->second.inaccessibleShards.insert(
|
||||
std::to_string(it->getCollection()->id()));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
// We have to make sure that all engines at least know all vertex
|
||||
// collections.
|
||||
// Thanks to fanout...
|
||||
for (auto const& it : vertices) {
|
||||
for (auto& entry : mappingServerToCollections) {
|
||||
auto vIt = entry.second.vertexCollections.find(it->getName());
|
||||
if (vIt == entry.second.vertexCollections.end()) {
|
||||
entry.second.vertexCollections.emplace(it->getName(),
|
||||
std::vector<ShardID>());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now we have sorted all collections on the db servers. Hand the engines
|
||||
// over
|
||||
// to the server builder.
|
||||
|
||||
// NOTE: This is only valid because it is single threaded and we do not
|
||||
// have concurrent access. We move out stuff from this Map => memory will
|
||||
// get corrupted if we would read it again.
|
||||
for (auto it : mappingServerToCollections) {
|
||||
// This condition is guaranteed because all shards have been prepared
|
||||
// for locking in the EngineInfos
|
||||
TRI_ASSERT(dbServerMapping.find(it.first) != dbServerMapping.end());
|
||||
dbServerMapping.find(it.first)->second.addTraverserEngine(
|
||||
en, std::move(it.second));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result EngineInfoContainerDBServer::buildEngines(
|
||||
Query* query, std::unordered_map<std::string, std::string>& queryIds,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_set<ShardID>* lockedShards) const {
|
||||
TRI_ASSERT(_engineStack.empty());
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "We have " << _engines.size()
|
||||
<< " DBServer engines";
|
||||
|
||||
// We create a map for DBServer => All Query snippets executed there
|
||||
auto dbServerMapping = createDBServerMapping(restrictToShards, lockedShards);
|
||||
// This Mapping does not contain Traversal Engines
|
||||
//
|
||||
// We add traversal engines if necessary
|
||||
injectGraphNodesToMapping(query, restrictToShards, dbServerMapping);
|
||||
|
||||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
return {TRI_ERROR_SHUTTING_DOWN};
|
||||
}
|
||||
|
||||
std::string const url("/_db/" + arangodb::basics::StringUtils::urlEncode(
|
||||
query->vocbase()->name()) +
|
||||
"/_api/aql/setup");
|
||||
|
||||
bool needCleanup = true;
|
||||
auto cleanup = [&]() {
|
||||
if (needCleanup) {
|
||||
cleanupEngines(cc, TRI_ERROR_INTERNAL, query->vocbase()->name(),
|
||||
queryIds);
|
||||
}
|
||||
};
|
||||
TRI_DEFER(cleanup());
|
||||
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
// Build Lookup Infos
|
||||
VPackBuilder infoBuilder;
|
||||
for (auto& it : dbServerMapping) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Building Engine Info for "
|
||||
<< it.first;
|
||||
infoBuilder.clear();
|
||||
it.second.buildMessage(query, infoBuilder);
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Sending the Engine info: "
|
||||
<< infoBuilder.toJson();
|
||||
|
||||
// Now we send to DBServers.
|
||||
// We expect a body with {snippets: {id => engineId}, traverserEngines:
|
||||
// [engineId]}}
|
||||
|
||||
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto res = cc->syncRequest("", coordTransactionID, "server:" + it.first,
|
||||
RequestType::POST, url, infoBuilder.toJson(),
|
||||
headers, SETUP_TIMEOUT);
|
||||
|
||||
if (res->getErrorCode() != TRI_ERROR_NO_ERROR) {
|
||||
LOG_TOPIC(DEBUG, Logger::AQL) << it.first << " respended with "
|
||||
<< res->getErrorCode() << " -> "
|
||||
<< res->stringifyErrorMessage();
|
||||
LOG_TOPIC(TRACE, Logger::AQL) << infoBuilder.toJson();
|
||||
return {res->getErrorCode(), res->stringifyErrorMessage()};
|
||||
}
|
||||
|
||||
std::shared_ptr<VPackBuilder> builder = res->result->getBodyVelocyPack();
|
||||
VPackSlice response = builder->slice();
|
||||
|
||||
if (!response.isObject() || !response.get("result").isObject()) {
|
||||
// TODO could not register all engines. Need to cleanup.
|
||||
LOG_TOPIC(ERR, Logger::AQL) << "Recieved error information from "
|
||||
<< it.first << " : " << response.toJson();
|
||||
return {TRI_ERROR_CLUSTER_AQL_COMMUNICATION,
|
||||
"Unable to deploy query on all required "
|
||||
"servers. This can happen during "
|
||||
"Failover. Please check: " +
|
||||
it.first};
|
||||
}
|
||||
|
||||
VPackSlice result = response.get("result");
|
||||
VPackSlice snippets = result.get("snippets");
|
||||
|
||||
for (auto const& resEntry : VPackObjectIterator(snippets)) {
|
||||
if (!resEntry.value.isString()) {
|
||||
return {TRI_ERROR_CLUSTER_AQL_COMMUNICATION,
|
||||
"Unable to deploy query on all required "
|
||||
"servers. This can happen during "
|
||||
"Failover. Please check: " +
|
||||
it.first};
|
||||
}
|
||||
queryIds.emplace(resEntry.key.copyString(), resEntry.value.copyString());
|
||||
}
|
||||
|
||||
VPackSlice travEngines = result.get("traverserEngines");
|
||||
if (!travEngines.isNone()) {
|
||||
if (!travEngines.isArray()) {
|
||||
// TODO could not register all traversal engines. Need to cleanup.
|
||||
return {TRI_ERROR_CLUSTER_AQL_COMMUNICATION,
|
||||
"Unable to deploy query on all required "
|
||||
"servers. This can happen during "
|
||||
"Failover. Please check: " +
|
||||
it.first};
|
||||
}
|
||||
|
||||
it.second.combineTraverserEngines(it.first, travEngines);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
resetSatellites();
|
||||
#endif
|
||||
needCleanup = false;
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
void EngineInfoContainerDBServer::addGraphNode(Query* query, GraphNode* node) {
|
||||
// Add all Edge Collections to the Transactions, Traversals do never write
|
||||
for (auto const& col : node->edgeColls()) {
|
||||
handleCollection(col.get(), AccessMode::Type::READ, false);
|
||||
}
|
||||
|
||||
// Add all Vertex Collections to the Transactions, Traversals do never write
|
||||
auto& vCols = node->vertexColls();
|
||||
if (vCols.empty()) {
|
||||
// This case indicates we do not have a named graph. We simply use
|
||||
// ALL collections known to this query.
|
||||
std::map<std::string, Collection*>* cs =
|
||||
query->collections()->collections();
|
||||
for (auto const& col : *cs) {
|
||||
handleCollection(col.second, AccessMode::Type::READ, false);
|
||||
}
|
||||
} else {
|
||||
for (auto const& col : node->vertexColls()) {
|
||||
handleCollection(col.get(), AccessMode::Type::READ, false);
|
||||
}
|
||||
}
|
||||
|
||||
_graphNodes.emplace_back(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Will send a shutdown to all engines registered in the list of
|
||||
* queryIds.
|
||||
* NOTE: This function will ignore all queryids where the key is not of
|
||||
* the expected format
|
||||
* they may be leftovers from Coordinator.
|
||||
* Will also clear the list of queryIds after return.
|
||||
*
|
||||
* @param cc The ClusterComm
|
||||
* @param errorCode error Code to be send to DBServers for logging.
|
||||
* @param dbname Name of the database this query is executed in.
|
||||
* @param queryIds A map of QueryIds of the format: (remoteNodeId:shardId) ->
|
||||
* queryid.
|
||||
*/
|
||||
void EngineInfoContainerDBServer::cleanupEngines(
|
||||
std::shared_ptr<ClusterComm> cc, int errorCode, std::string const& dbname,
|
||||
std::unordered_map<std::string, std::string>& queryIds) const {
|
||||
// Shutdown query snippets
|
||||
std::string url("/_db/" + arangodb::basics::StringUtils::urlEncode(dbname) +
|
||||
"/_api/aql/shutdown/");
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
auto body = std::make_shared<std::string>("{\"code\":" +
|
||||
std::to_string(errorCode) + "}");
|
||||
for (auto const& it : queryIds) {
|
||||
auto pos = it.first.find(':');
|
||||
if (pos == it.first.npos) {
|
||||
// We we get here the setup format was not as expected.
|
||||
TRI_ASSERT(false);
|
||||
continue;
|
||||
}
|
||||
auto shardId = it.first.substr(pos + 1);
|
||||
requests.emplace_back(shardId, rest::RequestType::PUT, url + it.second,
|
||||
body);
|
||||
}
|
||||
|
||||
// Shutdown traverser engines
|
||||
url = "/_db/" + arangodb::basics::StringUtils::urlEncode(dbname) +
|
||||
"/_internal/traverser/";
|
||||
std::shared_ptr<std::string> noBody;
|
||||
for (auto const& gn : _graphNodes) {
|
||||
auto allEngines = gn->engines();
|
||||
for (auto const& engine : *allEngines) {
|
||||
requests.emplace_back(engine.first, rest::RequestType::DELETE_REQ,
|
||||
url + basics::StringUtils::itoa(engine.second), noBody);
|
||||
}
|
||||
}
|
||||
|
||||
cc->fireAndForgetRequests(requests);
|
||||
queryIds.clear();
|
||||
}
|
|
@ -0,0 +1,245 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_ENGINE_INFO_CONTAINER_DBSERVER_H
|
||||
#define ARANGOD_AQL_ENGINE_INFO_CONTAINER_DBSERVER_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include "Aql/types.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "VocBase/AccessMode.h"
|
||||
|
||||
#include <stack>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class ClusterComm;
|
||||
class Result;
|
||||
|
||||
namespace aql {
|
||||
|
||||
struct Collection;
|
||||
class ExecutionNode;
|
||||
class GraphNode;
|
||||
class Query;
|
||||
|
||||
class EngineInfoContainerDBServer {
|
||||
private:
|
||||
// @brief Local struct to create the
|
||||
// information required to build traverser engines
|
||||
// on DB servers.
|
||||
struct TraverserEngineShardLists {
|
||||
explicit TraverserEngineShardLists(size_t length) {
|
||||
// Make sure they all have a fixed size.
|
||||
edgeCollections.resize(length);
|
||||
}
|
||||
|
||||
~TraverserEngineShardLists() {}
|
||||
|
||||
// Mapping for edge collections to shardIds.
|
||||
// We have to retain the ordering of edge collections, all
|
||||
// vectors of these in one run need to have identical size.
|
||||
// This is because the conditions to query those edges have the
|
||||
// same ordering.
|
||||
std::vector<std::vector<ShardID>> edgeCollections;
|
||||
|
||||
// Mapping for vertexCollections to shardIds.
|
||||
std::unordered_map<std::string, std::vector<ShardID>> vertexCollections;
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
std::set<ShardID> inaccessibleShards;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct EngineInfo {
|
||||
public:
|
||||
explicit EngineInfo(size_t idOfRemoteNode);
|
||||
~EngineInfo();
|
||||
|
||||
EngineInfo(EngineInfo&) = delete;
|
||||
EngineInfo(EngineInfo const& other) = delete;
|
||||
EngineInfo(EngineInfo const&& other);
|
||||
|
||||
void connectQueryId(QueryId id);
|
||||
|
||||
void serializeSnippet(Query* query, ShardID id,
|
||||
velocypack::Builder& infoBuilder,
|
||||
bool isResponsibleForInit) const;
|
||||
|
||||
Collection const* collection() const;
|
||||
|
||||
void collection(Collection* col);
|
||||
|
||||
void addNode(ExecutionNode* node);
|
||||
|
||||
private:
|
||||
std::vector<ExecutionNode*> _nodes;
|
||||
size_t _idOfRemoteNode; // id of the remote node
|
||||
QueryId _otherId; // Id of query engine before this one
|
||||
Collection* _collection; // The collection used to connect to this engine
|
||||
};
|
||||
|
||||
struct DBServerInfo {
|
||||
public:
|
||||
DBServerInfo();
|
||||
~DBServerInfo();
|
||||
|
||||
public:
|
||||
void addShardLock(AccessMode::Type const& lock, ShardID const& id);
|
||||
|
||||
void addEngine(std::shared_ptr<EngineInfo> info, ShardID const& id);
|
||||
|
||||
void buildMessage(Query* query, velocypack::Builder& infoBuilder) const;
|
||||
|
||||
void addTraverserEngine(GraphNode* node,
|
||||
TraverserEngineShardLists&& shards);
|
||||
|
||||
void combineTraverserEngines(ServerID const& serverID,
|
||||
arangodb::velocypack::Slice const ids);
|
||||
|
||||
private:
|
||||
void injectTraverserEngines(VPackBuilder& infoBuilder) const;
|
||||
|
||||
void injectQueryOptions(Query* query,
|
||||
velocypack::Builder& infoBuilder) const;
|
||||
|
||||
private:
|
||||
// @brief Map of LockType to ShardId
|
||||
std::unordered_map<AccessMode::Type, std::vector<ShardID>> _shardLocking;
|
||||
|
||||
// @brief Map of all EngineInfos with their shards
|
||||
std::unordered_map<std::shared_ptr<EngineInfo>, std::vector<ShardID>>
|
||||
_engineInfos;
|
||||
|
||||
// @brief List of all information required for traverser engines
|
||||
std::vector<std::pair<GraphNode*, TraverserEngineShardLists>>
|
||||
_traverserEngineInfos;
|
||||
};
|
||||
|
||||
public:
|
||||
EngineInfoContainerDBServer();
|
||||
|
||||
~EngineInfoContainerDBServer();
|
||||
|
||||
// Insert a new node into the last engine on the stack
|
||||
// If this Node contains Collections, they will be added into the map
|
||||
// for ShardLocking
|
||||
void addNode(ExecutionNode* node);
|
||||
|
||||
// Open a new snippet, which is connected to the given remoteNode id
|
||||
void openSnippet(size_t idOfRemoteNode);
|
||||
|
||||
// Closes the given snippet and connects it
|
||||
// to the given queryid of the coordinator.
|
||||
void closeSnippet(QueryId id);
|
||||
|
||||
// Build the Engines for the DBServer
|
||||
// * Creates one Query-Entry for each Snippet per Shard (multiple on the
|
||||
// same DB)
|
||||
// * All snippets know all locking information for the query.
|
||||
// * Only the first snippet is responsible to lock.
|
||||
// * After this step DBServer-Collections are locked!
|
||||
//
|
||||
// Error Case: It is guaranteed that for all snippets created during
|
||||
// this methods a shutdown request is send to all DBServers.
|
||||
// In case the network is broken and this shutdown request is lost
|
||||
// the DBServers will clean up their snippets after a TTL.
|
||||
Result buildEngines(Query* query,
|
||||
std::unordered_map<std::string, std::string>& queryIds,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_set<ShardID>* lockedShards) const;
|
||||
|
||||
/**
|
||||
* @brief Will send a shutdown to all engines registered in the list of
|
||||
* queryIds.
|
||||
* NOTE: This function will ignore all queryids where the key is not of
|
||||
* the expected format
|
||||
* they may be leftovers from Coordinator.
|
||||
* Will also clear the list of queryIds after return.
|
||||
*
|
||||
* @param cc The ClusterComm
|
||||
* @param errorCode error Code to be send to DBServers for logging.
|
||||
* @param dbname Name of the database this query is executed in.
|
||||
* @param queryIds A map of QueryIds of the format: (remoteNodeId:shardId) ->
|
||||
* queryid.
|
||||
*/
|
||||
void cleanupEngines(
|
||||
std::shared_ptr<ClusterComm> cc, int errorCode, std::string const& dbname,
|
||||
std::unordered_map<std::string, std::string>& queryIds) const;
|
||||
|
||||
// Insert a GraphNode that needs to generate TraverserEngines on
|
||||
// the DBServers. The GraphNode itself will retain on the coordinator.
|
||||
void addGraphNode(Query* query, GraphNode* node);
|
||||
|
||||
private:
|
||||
void handleCollection(Collection const* col,
|
||||
AccessMode::Type const& accessType,
|
||||
bool updateCollection);
|
||||
|
||||
// @brief Helper to create DBServerInfos and sort collections/shards into
|
||||
// them
|
||||
std::map<ServerID, EngineInfoContainerDBServer::DBServerInfo>
|
||||
createDBServerMapping(std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_set<ShardID>* lockedShards) const;
|
||||
|
||||
// @brief Helper to inject the TraverserEngines into the correct infos
|
||||
void injectGraphNodesToMapping(
|
||||
Query* query, std::unordered_set<std::string> const& restrictToShards,
|
||||
std::map<ServerID, EngineInfoContainerDBServer::DBServerInfo>&
|
||||
dbServerMapping) const;
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
void prepareSatellites(
|
||||
std::map<ServerID, DBServerInfo>& dbServerMapping,
|
||||
std::unordered_set<std::string> const& restrictToShards) const;
|
||||
|
||||
void resetSatellites() const;
|
||||
#endif
|
||||
|
||||
private:
|
||||
// @brief Reference to the last inserted EngineInfo, used for back linking of
|
||||
// QueryIds
|
||||
std::stack<std::shared_ptr<EngineInfo>> _engineStack;
|
||||
|
||||
// @brief List of EngineInfos to distribute accross the cluster
|
||||
std::unordered_map<Collection const*,
|
||||
std::vector<std::shared_ptr<EngineInfo>>>
|
||||
_engines;
|
||||
|
||||
// @brief Mapping of used collection names to lock type required
|
||||
std::unordered_map<Collection const*, AccessMode::Type> _collections;
|
||||
|
||||
#ifdef USE_ENTERPRISE
|
||||
// @brief List of all satellite collections
|
||||
std::unordered_set<Collection const*> _satellites;
|
||||
#endif
|
||||
|
||||
// @brief List of all graphNodes that need to create TraverserEngines on
|
||||
// DBServers
|
||||
std::vector<GraphNode*> _graphNodes;
|
||||
};
|
||||
|
||||
} // aql
|
||||
} // arangodb
|
||||
#endif
|
|
@ -104,7 +104,7 @@ class ExecutionBlock {
|
|||
void throwIfKilled();
|
||||
|
||||
/// @brief add a dependency
|
||||
void addDependency(ExecutionBlock* ep) {
|
||||
TEST_VIRTUAL void addDependency(ExecutionBlock* ep) {
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
_dependencies.emplace_back(ep);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -29,11 +29,11 @@
|
|||
#include "Aql/ExecutionBlock.h"
|
||||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/ExecutionStats.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace aql {
|
||||
class AqlItemBlock;
|
||||
class QueryRegistry;
|
||||
|
||||
class ExecutionEngine {
|
||||
public:
|
||||
|
@ -41,27 +41,33 @@ class ExecutionEngine {
|
|||
explicit ExecutionEngine(Query* query);
|
||||
|
||||
/// @brief destroy the engine, frees all assigned blocks
|
||||
~ExecutionEngine();
|
||||
TEST_VIRTUAL ~ExecutionEngine();
|
||||
|
||||
public:
|
||||
// @brief create an execution engine from a plan
|
||||
static ExecutionEngine* instantiateFromPlan(QueryRegistry*, Query*,
|
||||
ExecutionPlan*, bool);
|
||||
|
||||
TEST_VIRTUAL void createBlocks(
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const& includedShards,
|
||||
std::unordered_set<std::string> const& restrictToShards,
|
||||
std::unordered_map<std::string, std::string> const& queryIds);
|
||||
|
||||
/// @brief get the root block
|
||||
ExecutionBlock* root() const {
|
||||
TEST_VIRTUAL ExecutionBlock* root() const {
|
||||
TRI_ASSERT(_root != nullptr);
|
||||
return _root;
|
||||
}
|
||||
|
||||
/// @brief set the root block
|
||||
void root(ExecutionBlock* root) {
|
||||
TEST_VIRTUAL void root(ExecutionBlock* root) {
|
||||
TRI_ASSERT(root != nullptr);
|
||||
_root = root;
|
||||
}
|
||||
|
||||
/// @brief get the query
|
||||
Query* getQuery() const { return _query; }
|
||||
TEST_VIRTUAL Query* getQuery() const { return _query; }
|
||||
|
||||
/// @brief initializeCursor, could be called multiple times
|
||||
int initializeCursor(AqlItemBlock* items, size_t pos) {
|
||||
|
@ -104,7 +110,7 @@ class ExecutionEngine {
|
|||
inline int64_t remaining() const { return _root->remaining(); }
|
||||
|
||||
/// @brief add a block to the engine
|
||||
void addBlock(ExecutionBlock*);
|
||||
TEST_VIRTUAL void addBlock(ExecutionBlock*);
|
||||
|
||||
/// @brief add a block to the engine
|
||||
/// @returns added block
|
||||
|
@ -119,7 +125,7 @@ class ExecutionEngine {
|
|||
RegisterId resultRegister() const { return _resultRegister; }
|
||||
|
||||
/// @brief _lockedShards
|
||||
void setLockedShards(std::unordered_set<std::string>* lockedShards) {
|
||||
TEST_VIRTUAL void setLockedShards(std::unordered_set<std::string>* lockedShards) {
|
||||
_lockedShards = lockedShards;
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ class ExecutionNode {
|
|||
}
|
||||
|
||||
/// @brief get all dependencies
|
||||
std::vector<ExecutionNode*> getDependencies() const { return _dependencies; }
|
||||
TEST_VIRTUAL std::vector<ExecutionNode*> getDependencies() const { return _dependencies; }
|
||||
|
||||
/// @brief returns the first dependency, or a nullptr if none present
|
||||
ExecutionNode* getFirstDependency() const {
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/IndexBlock.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Transaction/Methods.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
|
@ -63,7 +64,6 @@ IndexNode::IndexNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& bas
|
|||
_indexes(),
|
||||
_condition(nullptr),
|
||||
_reverse(base.get("reverse").getBoolean()) {
|
||||
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
|
||||
|
|
|
@ -86,12 +86,12 @@ class Query {
|
|||
std::shared_ptr<arangodb::velocypack::Builder> const& queryStruct,
|
||||
std::shared_ptr<arangodb::velocypack::Builder> const& options, QueryPart);
|
||||
|
||||
virtual ~Query();
|
||||
TEST_VIRTUAL ~Query();
|
||||
|
||||
/// @brief clone a query
|
||||
/// note: as a side-effect, this will also create and start a transaction for
|
||||
/// the query
|
||||
Query* clone(QueryPart, bool);
|
||||
TEST_VIRTUAL Query* clone(QueryPart, bool);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -107,7 +107,7 @@ class Query {
|
|||
return _profile.get();
|
||||
}
|
||||
|
||||
QueryOptions const& queryOptions() const { return _queryOptions; }
|
||||
TEST_VIRTUAL QueryOptions const& queryOptions() const { return _queryOptions; }
|
||||
|
||||
void increaseMemoryUsage(size_t value) { _resourceMonitor.increaseMemoryUsage(value); }
|
||||
void decreaseMemoryUsage(size_t value) { _resourceMonitor.decreaseMemoryUsage(value); }
|
||||
|
@ -203,15 +203,15 @@ class Query {
|
|||
RegexCache* regexCache() { return &_regexCache; }
|
||||
|
||||
/// @brief return the engine, if prepared
|
||||
ExecutionEngine* engine() const { return _engine.get(); }
|
||||
TEST_VIRTUAL ExecutionEngine* engine() const { return _engine.get(); }
|
||||
|
||||
/// @brief inject the engine
|
||||
void setEngine(ExecutionEngine* engine);
|
||||
TEST_VIRTUAL void setEngine(ExecutionEngine* engine);
|
||||
|
||||
void releaseEngine();
|
||||
|
||||
/// @brief return the transaction, if prepared
|
||||
virtual transaction::Methods* trx() { return _trx; }
|
||||
TEST_VIRTUAL inline transaction::Methods* trx() { return _trx; }
|
||||
|
||||
/// @brief get the plan for the query
|
||||
ExecutionPlan* plan() const { return _plan.get(); }
|
||||
|
|
|
@ -69,6 +69,7 @@ QueryRegistry::~QueryRegistry() {
|
|||
void QueryRegistry::insert(QueryId id, Query* query, double ttl) {
|
||||
TRI_ASSERT(query != nullptr);
|
||||
TRI_ASSERT(query->trx() != nullptr);
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Register query with id " << id << " : " << query->queryString();
|
||||
auto vocbase = query->vocbase();
|
||||
|
||||
// create the query info object outside of the lock
|
||||
|
@ -111,19 +112,24 @@ void QueryRegistry::insert(QueryId id, Query* query, double ttl) {
|
|||
|
||||
/// @brief open
|
||||
Query* QueryRegistry::open(TRI_vocbase_t* vocbase, QueryId id) {
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Open query with id " << id;
|
||||
// std::cout << "Taking out query with ID " << id << std::endl;
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
auto m = _queries.find(vocbase->name());
|
||||
if (m == _queries.end()) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Found no queries for DB: " << vocbase->name();
|
||||
return nullptr;
|
||||
}
|
||||
auto q = m->second.find(id);
|
||||
if (q == m->second.end()) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query id " << id << " not found in registry";
|
||||
return nullptr;
|
||||
}
|
||||
QueryInfo* qi = q->second;
|
||||
if (qi->_isOpen) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query with id " << id << " is already in open";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL, "query with given vocbase and id is already open");
|
||||
}
|
||||
|
@ -139,12 +145,13 @@ Query* QueryRegistry::open(TRI_vocbase_t* vocbase, QueryId id) {
|
|||
}
|
||||
}
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query with id " << id << " is now in use";
|
||||
return qi->_query;
|
||||
}
|
||||
|
||||
/// @brief close
|
||||
void QueryRegistry::close(TRI_vocbase_t* vocbase, QueryId id, double ttl) {
|
||||
// std::cout << "Returning query with ID " << id << std::endl;
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Returning query with id " << id;
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
auto m = _queries.find(vocbase->name());
|
||||
|
@ -154,11 +161,13 @@ void QueryRegistry::close(TRI_vocbase_t* vocbase, QueryId id, double ttl) {
|
|||
}
|
||||
auto q = m->second.find(id);
|
||||
if (q == m->second.end()) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query id " << id << " not found in registry";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"query with given vocbase and id not found");
|
||||
}
|
||||
QueryInfo* qi = q->second;
|
||||
if (!qi->_isOpen) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query id " << id << " was not open.";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL, "query with given vocbase and id is not open");
|
||||
}
|
||||
|
@ -185,6 +194,7 @@ void QueryRegistry::close(TRI_vocbase_t* vocbase, QueryId id, double ttl) {
|
|||
|
||||
qi->_isOpen = false;
|
||||
qi->_expires = TRI_microtime() + qi->_timeToLive;
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query with id " << id << " is now returned.";
|
||||
}
|
||||
|
||||
/// @brief destroy
|
||||
|
@ -244,6 +254,7 @@ void QueryRegistry::destroy(std::string const& vocbase, QueryId id,
|
|||
// commit the operation
|
||||
queryInfo->_query->trx()->commit();
|
||||
}
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Query with id " << id << " is now destroyed";
|
||||
}
|
||||
|
||||
/// @brief destroy
|
||||
|
@ -274,6 +285,7 @@ void QueryRegistry::expireQueries() {
|
|||
|
||||
for (auto& p : toDelete) {
|
||||
try { // just in case
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Timeout for query with id " << p.second;
|
||||
destroy(p.first, p.second, TRI_ERROR_TRANSACTION_ABORTED);
|
||||
} catch (...) {
|
||||
}
|
||||
|
@ -303,6 +315,7 @@ void QueryRegistry::destroyAll() {
|
|||
}
|
||||
for (auto& p : allQueries) {
|
||||
try {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Timeout for query with id " << p.second << " due to shutdown";
|
||||
destroy(p.first, p.second, TRI_ERROR_SHUTTING_DOWN);
|
||||
} catch (...) {
|
||||
// ignore any errors here
|
||||
|
|
|
@ -38,14 +38,14 @@ class QueryRegistry {
|
|||
public:
|
||||
QueryRegistry() {}
|
||||
|
||||
~QueryRegistry();
|
||||
TEST_VIRTUAL ~QueryRegistry();
|
||||
|
||||
/// @brief insert, this inserts the query <query> for the vocbase <vocbase>
|
||||
/// and the id <id> into the registry. It is in error if there is already
|
||||
/// a query for this <vocbase> and <id> combination and an exception will
|
||||
/// be thrown in that case. The time to live <ttl> is in seconds and the
|
||||
/// query will be deleted if it is not opened for that amount of time.
|
||||
void insert(QueryId id, Query* query, double ttl = 600.0);
|
||||
TEST_VIRTUAL void insert(QueryId id, Query* query, double ttl = 600.0);
|
||||
|
||||
/// @brief open, find a query in the registry, if none is found, a nullptr
|
||||
/// is returned, otherwise, ownership of the query is transferred to the
|
||||
|
@ -69,7 +69,7 @@ class QueryRegistry {
|
|||
/// from the same thread that has opened it! Note that if the query is
|
||||
/// "open", then this will set the "killed" flag in the query and do not
|
||||
/// more.
|
||||
void destroy(std::string const& vocbase, QueryId id, int errorCode);
|
||||
TEST_VIRTUAL void destroy(std::string const& vocbase, QueryId id, int errorCode);
|
||||
|
||||
void destroy(TRI_vocbase_t* vocbase, QueryId id, int errorCode);
|
||||
|
||||
|
|
|
@ -27,23 +27,28 @@
|
|||
#include "Aql/ExecutionBlock.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/VPackStringBufferAdapter.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Cluster/TraverserEngine.h"
|
||||
#include "Cluster/TraverserEngineRegistry.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Rest/HttpRequest.h"
|
||||
#include "Rest/HttpResponse.h"
|
||||
#include "Scheduler/JobGuard.h"
|
||||
#include "Scheduler/JobQueue.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <velocypack/Dumper.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
@ -54,12 +59,14 @@ using VelocyPackHelper = arangodb::basics::VelocyPackHelper;
|
|||
|
||||
RestAqlHandler::RestAqlHandler(GeneralRequest* request,
|
||||
GeneralResponse* response,
|
||||
QueryRegistry* queryRegistry)
|
||||
std::pair<QueryRegistry*, traverser::TraverserEngineRegistry*>* registries)
|
||||
: RestVocbaseBaseHandler(request, response),
|
||||
_queryRegistry(queryRegistry),
|
||||
_queryRegistry(registries->first),
|
||||
_traverserRegistry(registries->second),
|
||||
_qId(0) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_queryRegistry != nullptr);
|
||||
TRI_ASSERT(_traverserRegistry != nullptr);
|
||||
}
|
||||
|
||||
// returns the queue name
|
||||
|
@ -67,13 +74,329 @@ size_t RestAqlHandler::queue() const { return JobQueue::AQL_QUEUE; }
|
|||
|
||||
bool RestAqlHandler::isDirect() const { return false; }
|
||||
|
||||
// POST method for /_api/aql/setup (internal)
|
||||
// Only available on DBServers in the Cluster.
|
||||
// This route sets-up all the query engines required
|
||||
// for a complete query on this server.
|
||||
// Furthermore it directly locks all shards for this query.
|
||||
// So after this route the query is ready to go.
|
||||
// NOTE: As this Route LOCKS the collections, the caller
|
||||
// is responsible to destroy those engines in a timely
|
||||
// manner, if the engines are not called for a period
|
||||
// of time, they will be garbage-collected and unlocked.
|
||||
// The body is a VelocyPack with the following layout:
|
||||
// {
|
||||
// lockInfo: {
|
||||
// NONE: [<collections to not-lock],
|
||||
// READ: [<collections to read-lock],
|
||||
// WRITE: [<collections to write-lock],
|
||||
// EXCLUSIVE: [<collections with exclusive-lock]
|
||||
// },
|
||||
// options: { < query options > },
|
||||
// snippets: {
|
||||
// <queryId: {nodes: [ <nodes>]}>
|
||||
// },
|
||||
// traverserEngines: [ <infos for traverser engines> ],
|
||||
// variables: [ <variables> ]
|
||||
// }
|
||||
|
||||
void RestAqlHandler::setupClusterQuery() {
|
||||
// We should not intentionally call this method
|
||||
// on the wrong server. So fail during maintanence.
|
||||
// On user setup reply gracefully.
|
||||
TRI_ASSERT(ServerState::instance()->isDBServer());
|
||||
if (!ServerState::instance()->isDBServer()) {
|
||||
generateError(rest::ResponseCode::METHOD_NOT_ALLOWED,
|
||||
TRI_ERROR_CLUSTER_ONLY_ON_DBSERVER);
|
||||
return;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------
|
||||
// SECTION: body validation
|
||||
// ---------------------------------------------------
|
||||
std::shared_ptr<VPackBuilder> bodyBuilder = parseVelocyPackBody();
|
||||
if (bodyBuilder == nullptr) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL) << "Failed to setup query. Could not "
|
||||
"parse the transmitted plan. "
|
||||
"Aborting query.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
|
||||
return;
|
||||
}
|
||||
|
||||
VPackSlice querySlice = bodyBuilder->slice();
|
||||
VPackSlice lockInfoSlice = querySlice.get("lockInfo");
|
||||
|
||||
if (!lockInfoSlice.isObject()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"lockInfo\" is required but not an object.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"lockInfo\"");
|
||||
return;
|
||||
}
|
||||
|
||||
VPackSlice optionsSlice = querySlice.get("options");
|
||||
if (!optionsSlice.isObject()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"options\" attribute missing.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"options\"");
|
||||
return;
|
||||
}
|
||||
|
||||
VPackSlice snippetsSlice = querySlice.get("snippets");
|
||||
if (!snippetsSlice.isObject()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"snippets\" attribute missing.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"snippets\"");
|
||||
return;
|
||||
}
|
||||
|
||||
VPackSlice traverserSlice = querySlice.get("traverserEngines");
|
||||
if (!traverserSlice.isNone() && !traverserSlice.isArray()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"traverserEngines\" attribute is not an array.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"if \"traverserEngines\" is set in the body, it has to be an array");
|
||||
return;
|
||||
}
|
||||
|
||||
VPackSlice variablesSlice = querySlice.get("variables");
|
||||
if (!variablesSlice.isArray()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"variables\" attribute missing.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"variables\"");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Now we need to create shared_ptr<VPackBuilder>
|
||||
// That contains the old-style cluster snippet in order
|
||||
// to prepare create a Query object.
|
||||
// This old snippet is created as follows:
|
||||
//
|
||||
// {
|
||||
// collections: [ { name: "xyz", type: "READ" }, {name: "abc", type: "WRITE"} ],
|
||||
// initialize: false,
|
||||
// nodes: <one of snippets[*].value>,
|
||||
// variables: <variables slice>
|
||||
// }
|
||||
|
||||
|
||||
auto options = std::make_shared<VPackBuilder>(
|
||||
VPackBuilder::clone(optionsSlice));
|
||||
|
||||
|
||||
// Build the collection information
|
||||
VPackBuilder collectionBuilder;
|
||||
collectionBuilder.openArray();
|
||||
for (auto const& lockInf : VPackObjectIterator(lockInfoSlice)) {
|
||||
if (!lockInf.value.isArray()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"lockInfo." << lockInf.key.copyString()
|
||||
<< "\" is required but not an array.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute: \"lockInfo." + lockInf.key.copyString() + "\" is required but not an array.");
|
||||
return;
|
||||
}
|
||||
for (auto const& col : VPackArrayIterator(lockInf.value)) {
|
||||
if (!col.isString()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Invalid VelocyPack: \"lockInfo." << lockInf.key.copyString()
|
||||
<< "\" is required but not an array.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute: \"lockInfo." + lockInf.key.copyString() + "\" is required but not an array.");
|
||||
return;
|
||||
}
|
||||
collectionBuilder.openObject();
|
||||
collectionBuilder.add("name", col);
|
||||
collectionBuilder.add("type", lockInf.key);
|
||||
collectionBuilder.close();
|
||||
}
|
||||
}
|
||||
collectionBuilder.close();
|
||||
|
||||
// Now the query is ready to go, store it in the registry and return:
|
||||
double ttl = 600.0;
|
||||
bool found;
|
||||
std::string const& ttlstring = _request->header("ttl", found);
|
||||
|
||||
if (found) {
|
||||
ttl = arangodb::basics::StringUtils::doubleDecimal(ttlstring);
|
||||
}
|
||||
|
||||
VPackBuilder answerBuilder;
|
||||
answerBuilder.openObject();
|
||||
bool needToLock = true;
|
||||
bool res = false;
|
||||
res = registerSnippets(snippetsSlice, collectionBuilder.slice(), variablesSlice,
|
||||
options, ttl, needToLock, answerBuilder);
|
||||
if (!res) {
|
||||
// TODO we need to trigger cleanup here??
|
||||
// Registering the snippets failed.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!traverserSlice.isNone()) {
|
||||
|
||||
res = registerTraverserEngines(traverserSlice, needToLock, ttl, answerBuilder);
|
||||
|
||||
if (!res) {
|
||||
// TODO we need to trigger cleanup here??
|
||||
// Registering the traverser engines failed.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
answerBuilder.close();
|
||||
|
||||
generateOk(rest::ResponseCode::OK, answerBuilder.slice());
|
||||
}
|
||||
|
||||
bool RestAqlHandler::registerSnippets(
|
||||
VPackSlice const snippetsSlice,
|
||||
VPackSlice const collectionSlice,
|
||||
VPackSlice const variablesSlice,
|
||||
std::shared_ptr<VPackBuilder> options,
|
||||
double const ttl,
|
||||
bool& needToLock,
|
||||
VPackBuilder& answerBuilder
|
||||
) {
|
||||
TRI_ASSERT(answerBuilder.isOpenObject());
|
||||
answerBuilder.add(VPackValue("snippets"));
|
||||
answerBuilder.openObject();
|
||||
// NOTE: We need to clean up all engines if we bail out during the following
|
||||
// loop
|
||||
for (auto const& it : VPackObjectIterator(snippetsSlice)) {
|
||||
auto planBuilder = std::make_shared<VPackBuilder>();
|
||||
planBuilder->openObject();
|
||||
planBuilder->add(VPackValue("collections"));
|
||||
planBuilder->add(collectionSlice);
|
||||
|
||||
// hard-code initialize: false
|
||||
planBuilder->add("initialize", VPackValue(false));
|
||||
|
||||
planBuilder->add(VPackValue("nodes"));
|
||||
planBuilder->add(it.value.get("nodes"));
|
||||
|
||||
planBuilder->add(VPackValue("variables"));
|
||||
planBuilder->add(variablesSlice);
|
||||
planBuilder->close(); // base-object
|
||||
|
||||
// All snippets know all collections.
|
||||
// The first snippet will provide proper locking
|
||||
auto query = std::make_unique<Query>(false, _vocbase, planBuilder, options,
|
||||
(needToLock ? PART_MAIN : PART_DEPENDENT));
|
||||
try {
|
||||
query->prepare(_queryRegistry, 0);
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "failed to instantiate the query: " << ex.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN,
|
||||
ex.what());
|
||||
return false;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "failed to instantiate the query";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
QueryId qId = TRI_NewTickServer();
|
||||
|
||||
if (needToLock) {
|
||||
// Directly try to lock only the first snippet is required to be locked.
|
||||
// For all others locking is pointless
|
||||
needToLock = false;
|
||||
|
||||
{
|
||||
JobGuard guard(SchedulerFeature::SCHEDULER);
|
||||
guard.block();
|
||||
|
||||
try {
|
||||
int res = query->trx()->lockCollections();
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
res, TRI_errno_string(res));
|
||||
return false;
|
||||
}
|
||||
} catch (basics::Exception const& e) {
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
e.code(), e.message());
|
||||
return false;
|
||||
} catch (...) {
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
"Unable to lock all collections.");
|
||||
return false;
|
||||
}
|
||||
// If we get here we successfully locked the collections.
|
||||
// If we bail out up to this point nothing is kept alive.
|
||||
// No need to cleanup...
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
_queryRegistry->insert(qId, query.get(), ttl);
|
||||
query.release();
|
||||
answerBuilder.add(it.key);
|
||||
answerBuilder.add(VPackValue(arangodb::basics::StringUtils::itoa(qId)));
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "could not keep query in registry";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"could not keep query in registry");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
answerBuilder.close(); // Snippets
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RestAqlHandler::registerTraverserEngines(VPackSlice const traverserEngines, bool& needToLock, double ttl, VPackBuilder& answerBuilder) {
|
||||
TRI_ASSERT(traverserEngines.isArray());
|
||||
|
||||
TRI_ASSERT(answerBuilder.isOpenObject());
|
||||
answerBuilder.add(VPackValue("traverserEngines"));
|
||||
answerBuilder.openArray();
|
||||
|
||||
for (auto const& te : VPackArrayIterator(traverserEngines)) {
|
||||
try {
|
||||
traverser::TraverserEngineID id =
|
||||
_traverserRegistry->createNew(_vocbase, te, needToLock, ttl);
|
||||
needToLock = false;
|
||||
TRI_ASSERT(id != 0);
|
||||
answerBuilder.add(VPackValue(id));
|
||||
} catch (basics::Exception const& e) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Failed to instanciate traverser engines. Reason: " << e.message();
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, e.code(), e.message());
|
||||
return false;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::AQL)
|
||||
<< "Failed to instanciate traverser engines.";
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
"Unable to instanciate traverser engines");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
answerBuilder.close(); // traverserEngines
|
||||
// Everything went well
|
||||
return true;
|
||||
}
|
||||
|
||||
// POST method for /_api/aql/instantiate (internal)
|
||||
// The body is a VelocyPack with attributes "plan" for the execution plan and
|
||||
// "options" for the options, all exactly as in AQL_EXECUTEJSON.
|
||||
void RestAqlHandler::createQueryFromVelocyPack() {
|
||||
std::shared_ptr<VPackBuilder> queryBuilder = parseVelocyPackBody();
|
||||
if (queryBuilder == nullptr) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "invalid VelocyPack plan in query";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "invalid VelocyPack plan in query";
|
||||
return;
|
||||
}
|
||||
VPackSlice querySlice = queryBuilder->slice();
|
||||
|
@ -82,7 +405,8 @@ void RestAqlHandler::createQueryFromVelocyPack() {
|
|||
|
||||
VPackSlice plan = querySlice.get("plan");
|
||||
if (plan.isNone()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Invalid VelocyPack: \"plan\" attribute missing.";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "Invalid VelocyPack: \"plan\" attribute missing.";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"plan\"");
|
||||
return;
|
||||
|
@ -95,14 +419,17 @@ void RestAqlHandler::createQueryFromVelocyPack() {
|
|||
VelocyPackHelper::getStringValue(querySlice, "part", "");
|
||||
|
||||
auto planBuilder = std::make_shared<VPackBuilder>(VPackBuilder::clone(plan));
|
||||
auto query = std::make_unique<Query>(false, _vocbase, planBuilder, options,
|
||||
(part == "main" ? PART_MAIN : PART_DEPENDENT));
|
||||
|
||||
auto query =
|
||||
std::make_unique<Query>(false, _vocbase, planBuilder, options,
|
||||
(part == "main" ? PART_MAIN : PART_DEPENDENT));
|
||||
|
||||
try {
|
||||
query->prepare(_queryRegistry, 0);
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed to instantiate the query: " << ex.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN,
|
||||
ex.what());
|
||||
return;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
|
||||
|
@ -126,8 +453,10 @@ void RestAqlHandler::createQueryFromVelocyPack() {
|
|||
_queryRegistry->insert(_qId, query.get(), ttl);
|
||||
query.release();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL, "could not keep query in registry");
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "could not keep query in registry";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"could not keep query in registry");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -148,7 +477,8 @@ void RestAqlHandler::createQueryFromVelocyPack() {
|
|||
return;
|
||||
}
|
||||
|
||||
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
|
||||
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
|
||||
transactionContext);
|
||||
}
|
||||
|
||||
// POST method for /_api/aql/parse (internal)
|
||||
|
@ -159,7 +489,8 @@ void RestAqlHandler::createQueryFromVelocyPack() {
|
|||
void RestAqlHandler::parseQuery() {
|
||||
std::shared_ptr<VPackBuilder> bodyBuilder = parseVelocyPackBody();
|
||||
if (bodyBuilder == nullptr) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "invalid VelocyPack plan in query";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "invalid VelocyPack plan in query";
|
||||
return;
|
||||
}
|
||||
VPackSlice querySlice = bodyBuilder->slice();
|
||||
|
@ -167,17 +498,20 @@ void RestAqlHandler::parseQuery() {
|
|||
std::string const queryString =
|
||||
VelocyPackHelper::getStringValue(querySlice, "query", "");
|
||||
if (queryString.empty()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "body must be an object with attribute \"query\"";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "body must be an object with attribute \"query\"";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"query\"");
|
||||
return;
|
||||
}
|
||||
|
||||
auto query = std::make_unique<Query>(false, _vocbase, QueryString(queryString),
|
||||
std::shared_ptr<VPackBuilder>(), nullptr, PART_MAIN);
|
||||
auto query = std::make_unique<Query>(
|
||||
false, _vocbase, QueryString(queryString),
|
||||
std::shared_ptr<VPackBuilder>(), nullptr, PART_MAIN);
|
||||
QueryResult res = query->parse();
|
||||
if (res.code != TRI_ERROR_NO_ERROR) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the Query: " << res.details;
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed to instantiate the Query: " << res.details;
|
||||
generateError(rest::ResponseCode::BAD, res.code, res.details);
|
||||
return;
|
||||
}
|
||||
|
@ -235,7 +569,8 @@ void RestAqlHandler::explainQuery() {
|
|||
std::string queryString =
|
||||
VelocyPackHelper::getStringValue(querySlice, "query", "");
|
||||
if (queryString.empty()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "body must be an object with attribute \"query\"";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "body must be an object with attribute \"query\"";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"query\"");
|
||||
return;
|
||||
|
@ -247,12 +582,12 @@ void RestAqlHandler::explainQuery() {
|
|||
auto options = std::make_shared<VPackBuilder>(
|
||||
VPackBuilder::clone(querySlice.get("options")));
|
||||
|
||||
auto query =
|
||||
std::make_unique<Query>(false, _vocbase, QueryString(queryString),
|
||||
bindVars, options, PART_MAIN);
|
||||
auto query = std::make_unique<Query>(
|
||||
false, _vocbase, QueryString(queryString), bindVars, options, PART_MAIN);
|
||||
QueryResult res = query->explain();
|
||||
if (res.code != TRI_ERROR_NO_ERROR) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the Query: " << res.details;
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed to instantiate the Query: " << res.details;
|
||||
generateError(rest::ResponseCode::BAD, res.code, res.details);
|
||||
return;
|
||||
}
|
||||
|
@ -300,7 +635,8 @@ void RestAqlHandler::createQueryFromString() {
|
|||
std::string const queryString =
|
||||
VelocyPackHelper::getStringValue(querySlice, "query", "");
|
||||
if (queryString.empty()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "body must be an object with attribute \"query\"";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "body must be an object with attribute \"query\"";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"query\"");
|
||||
return;
|
||||
|
@ -309,7 +645,8 @@ void RestAqlHandler::createQueryFromString() {
|
|||
std::string const part =
|
||||
VelocyPackHelper::getStringValue(querySlice, "part", "");
|
||||
if (part.empty()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "body must be an object with attribute \"part\"";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "body must be an object with attribute \"part\"";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"body must be an object with attribute \"part\"");
|
||||
return;
|
||||
|
@ -320,18 +657,21 @@ void RestAqlHandler::createQueryFromString() {
|
|||
auto options = std::make_shared<VPackBuilder>(
|
||||
VPackBuilder::clone(querySlice.get("options")));
|
||||
|
||||
auto query = std::make_unique<Query>(false, _vocbase, QueryString(queryString),
|
||||
bindVars, options,
|
||||
(part == "main" ? PART_MAIN : PART_DEPENDENT));
|
||||
|
||||
auto query = std::make_unique<Query>(
|
||||
false, _vocbase, QueryString(queryString), bindVars, options,
|
||||
(part == "main" ? PART_MAIN : PART_DEPENDENT));
|
||||
|
||||
try {
|
||||
query->prepare(_queryRegistry, 0);
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed to instantiate the query: " << ex.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN,
|
||||
ex.what());
|
||||
return;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed to instantiate the query";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
|
||||
return;
|
||||
}
|
||||
|
@ -352,7 +692,8 @@ void RestAqlHandler::createQueryFromString() {
|
|||
_queryRegistry->insert(_qId, query.get(), ttl);
|
||||
query.release();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "could not keep query in registry";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
|
||||
"could not keep query in registry");
|
||||
return;
|
||||
|
@ -376,7 +717,8 @@ void RestAqlHandler::createQueryFromString() {
|
|||
return;
|
||||
}
|
||||
|
||||
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
|
||||
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
|
||||
transactionContext);
|
||||
}
|
||||
|
||||
// PUT method for /_api/aql/<operation>/<queryId>, (internal)
|
||||
|
@ -460,15 +802,18 @@ void RestAqlHandler::useQuery(std::string const& operation,
|
|||
} catch (std::exception const& ex) {
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of Query: " << ex.what();
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of Query: "
|
||||
<< ex.what();
|
||||
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR, ex.what());
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
ex.what());
|
||||
} catch (...) {
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of Query: Unknown exception occurred";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed during use of Query: Unknown exception occurred";
|
||||
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
TRI_ERROR_HTTP_SERVER_ERROR, "an unknown exception occurred");
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
"an unknown exception occurred");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -523,7 +868,8 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
|
|||
auto block = static_cast<BlockWithClients*>(query->engine()->root());
|
||||
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
|
||||
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unexpected node type");
|
||||
}
|
||||
number = block->remainingForShard(shardId);
|
||||
}
|
||||
|
@ -540,7 +886,8 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
|
|||
auto block = static_cast<BlockWithClients*>(query->engine()->root());
|
||||
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
|
||||
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unexpected node type");
|
||||
}
|
||||
hasMore = block->hasMoreForShard(shardId);
|
||||
}
|
||||
|
@ -549,8 +896,7 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
|
|||
} else {
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "referenced query not found";
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_HTTP_NOT_FOUND);
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -562,16 +908,19 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
|
|||
} catch (std::exception const& ex) {
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of query: " << ex.what();
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of query: "
|
||||
<< ex.what();
|
||||
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR, ex.what());
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
ex.what());
|
||||
} catch (...) {
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed during use of query: Unknown exception occurred";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "failed during use of query: Unknown exception occurred";
|
||||
|
||||
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||
TRI_ERROR_HTTP_SERVER_ERROR, "an unknown exception occurred");
|
||||
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
|
||||
"an unknown exception occurred");
|
||||
}
|
||||
|
||||
_queryRegistry->close(_vocbase, _qId);
|
||||
|
@ -595,8 +944,7 @@ RestStatus RestAqlHandler::execute() {
|
|||
switch (type) {
|
||||
case rest::RequestType::POST: {
|
||||
if (suffixes.size() != 1) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_HTTP_NOT_FOUND);
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
|
||||
} else if (suffixes[0] == "instantiate") {
|
||||
createQueryFromVelocyPack();
|
||||
} else if (suffixes[0] == "parse") {
|
||||
|
@ -605,6 +953,8 @@ RestStatus RestAqlHandler::execute() {
|
|||
explainQuery();
|
||||
} else if (suffixes[0] == "query") {
|
||||
createQueryFromString();
|
||||
} else if (suffixes[0] == "setup") {
|
||||
setupClusterQuery();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Unknown API";
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND, "Unknown API");
|
||||
|
@ -679,8 +1029,7 @@ bool RestAqlHandler::findQuery(std::string const& idString, Query*& query) {
|
|||
|
||||
if (query == nullptr) {
|
||||
_qId = 0;
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_QUERY_NOT_FOUND);
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_QUERY_NOT_FOUND);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -742,7 +1091,8 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
auto block = static_cast<BlockWithClients*>(query->engine()->root());
|
||||
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
|
||||
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unexpected node type");
|
||||
}
|
||||
items.reset(block->getSomeForShard(atLeast, atMost, shardId));
|
||||
}
|
||||
|
@ -773,7 +1123,8 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
static_cast<BlockWithClients*>(query->engine()->root());
|
||||
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
|
||||
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unexpected node type");
|
||||
}
|
||||
skipped = block->skipSomeForShard(atLeast, atMost, shardId);
|
||||
}
|
||||
|
@ -803,7 +1154,8 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
static_cast<BlockWithClients*>(query->engine()->root());
|
||||
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
|
||||
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unexpected node type");
|
||||
}
|
||||
exhausted = block->skipForShard(number, shardId);
|
||||
}
|
||||
|
@ -848,7 +1200,8 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
true)) {
|
||||
res = query->engine()->initializeCursor(nullptr, 0);
|
||||
} else {
|
||||
items.reset(new AqlItemBlock(query->resourceMonitor(), querySlice.get("items")));
|
||||
items.reset(new AqlItemBlock(query->resourceMonitor(),
|
||||
querySlice.get("items")));
|
||||
res = query->engine()->initializeCursor(items.get(), pos);
|
||||
}
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
|
@ -901,11 +1254,11 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
answerBuilder.add(StaticStrings::Error, VPackValue(res != TRI_ERROR_NO_ERROR));
|
||||
answerBuilder.add(StaticStrings::Code, VPackValue(res));
|
||||
} else {
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_HTTP_NOT_FOUND);
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sendResponse(rest::ResponseCode::OK, answerBuilder.slice(),
|
||||
transactionContext.get());
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
|
@ -923,8 +1276,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
|
|||
// extract the VelocyPack from the request
|
||||
std::shared_ptr<VPackBuilder> RestAqlHandler::parseVelocyPackBody() {
|
||||
try {
|
||||
std::shared_ptr<VPackBuilder> body =
|
||||
_request->toVelocyPackBuilderPtr();
|
||||
std::shared_ptr<VPackBuilder> body = _request->toVelocyPackBuilderPtr();
|
||||
if (body == nullptr) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot parse json object";
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
|
@ -934,25 +1286,25 @@ std::shared_ptr<VPackBuilder> RestAqlHandler::parseVelocyPackBody() {
|
|||
VPackSlice tmp = body->slice();
|
||||
if (!tmp.isObject()) {
|
||||
// Validate the input has correct format.
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "body of request must be a VelocyPack object";
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "body of request must be a VelocyPack object";
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"body of request must be a VelcoyPack object");
|
||||
return nullptr;
|
||||
}
|
||||
return body;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot parse json object";
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_CORRUPTED_JSON, "cannot parse json object");
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_CORRUPTED_JSON,
|
||||
"cannot parse json object");
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Send slice as result with the given response type.
|
||||
void RestAqlHandler::sendResponse(
|
||||
rest::ResponseCode code, VPackSlice const slice,
|
||||
transaction::Context* transactionContext) {
|
||||
void RestAqlHandler::sendResponse(rest::ResponseCode code,
|
||||
VPackSlice const slice,
|
||||
transaction::Context* transactionContext) {
|
||||
resetResponse(code);
|
||||
writeResult(slice, *(transactionContext->getVPackOptionsForDump()));
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#define ARANGOD_AQL_REST_AQL_HANDLER_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "Aql/types.h"
|
||||
#include "RestHandler/RestVocbaseBaseHandler.h"
|
||||
#include "RestServer/VocbaseContext.h"
|
||||
|
@ -33,12 +32,20 @@
|
|||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace traverser {
|
||||
class TraverserEngineRegistry;
|
||||
}
|
||||
|
||||
namespace aql {
|
||||
class Query;
|
||||
class QueryRegistry;
|
||||
|
||||
/// @brief shard control request handler
|
||||
class RestAqlHandler : public RestVocbaseBaseHandler {
|
||||
public:
|
||||
RestAqlHandler(GeneralRequest*, GeneralResponse*, QueryRegistry*);
|
||||
RestAqlHandler(GeneralRequest*, GeneralResponse*,
|
||||
std::pair<QueryRegistry*, traverser::TraverserEngineRegistry*>*);
|
||||
|
||||
public:
|
||||
char const* name() const override final { return "RestAqlHandler"; }
|
||||
|
@ -100,6 +107,45 @@ class RestAqlHandler : public RestVocbaseBaseHandler {
|
|||
void getInfoQuery(std::string const& operation, std::string const& idString);
|
||||
|
||||
private:
|
||||
|
||||
// POST method for /_api/aql/setup (internal)
|
||||
// Only available on DBServers in the Cluster.
|
||||
// This route sets-up all the query engines required
|
||||
// for a complete query on this server.
|
||||
// Furthermore it directly locks all shards for this query.
|
||||
// So after this route the query is ready to go.
|
||||
// NOTE: As this Route LOCKS the collections, the caller
|
||||
// is responsible to destroy those engines in a timely
|
||||
// manner, if the engines are not called for a period
|
||||
// of time, they will be garbage-collected and unlocked.
|
||||
// The body is a VelocyPack with the following layout:
|
||||
// {
|
||||
// lockInfo: {
|
||||
// READ: [<collections to read-lock],
|
||||
// WRITE: [<collections to write-lock]
|
||||
// },
|
||||
// options: { < query options > },
|
||||
// snippets: {
|
||||
// <queryId: {nodes: [ <nodes>]}>
|
||||
// },
|
||||
// variables: [ <variables> ]
|
||||
// }
|
||||
|
||||
void setupClusterQuery();
|
||||
|
||||
bool registerSnippets(arangodb::velocypack::Slice const snippets,
|
||||
arangodb::velocypack::Slice const collections,
|
||||
arangodb::velocypack::Slice const variables,
|
||||
std::shared_ptr<arangodb::velocypack::Builder> options,
|
||||
double const ttl,
|
||||
bool& needToLock,
|
||||
arangodb::velocypack::Builder& answer);
|
||||
|
||||
bool registerTraverserEngines(arangodb::velocypack::Slice const traversers,
|
||||
bool& needToLock,
|
||||
double const ttl,
|
||||
arangodb::velocypack::Builder& answer);
|
||||
|
||||
// Send slice as result with the given response type.
|
||||
void sendResponse(rest::ResponseCode,
|
||||
arangodb::velocypack::Slice const, transaction::Context*);
|
||||
|
@ -123,6 +169,9 @@ class RestAqlHandler : public RestVocbaseBaseHandler {
|
|||
// our query registry
|
||||
QueryRegistry* _queryRegistry;
|
||||
|
||||
// our traversal engine registry
|
||||
traverser::TraverserEngineRegistry* _traverserRegistry;
|
||||
|
||||
// id of current query
|
||||
QueryId _qId;
|
||||
};
|
||||
|
|
|
@ -169,6 +169,7 @@ SET(ARANGOD_SOURCES
|
|||
Aql/AqlFunctionFeature.cpp
|
||||
Aql/AqlItemBlock.cpp
|
||||
Aql/AqlItemBlockManager.cpp
|
||||
Aql/AqlResult.cpp
|
||||
Aql/AqlTransaction.cpp
|
||||
Aql/AqlValue.cpp
|
||||
Aql/Ast.cpp
|
||||
|
@ -190,6 +191,8 @@ SET(ARANGOD_SOURCES
|
|||
Aql/ConditionFinder.cpp
|
||||
Aql/DocumentProducingBlock.cpp
|
||||
Aql/DocumentProducingNode.cpp
|
||||
Aql/EngineInfoContainerCoordinator.cpp
|
||||
Aql/EngineInfoContainerDBServer.cpp
|
||||
Aql/EnumerateCollectionBlock.cpp
|
||||
Aql/EnumerateListBlock.cpp
|
||||
Aql/ExecutionBlock.cpp
|
||||
|
|
|
@ -1058,6 +1058,37 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
|
|||
return nrGood;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief this method performs the given requests described by the vector
|
||||
/// of ClusterCommRequest structs in the following way:
|
||||
/// Each request is done with asyncRequest.
|
||||
/// After each request is successfully send out we drop all requests.
|
||||
/// Hence it is guaranteed that all requests are send, but
|
||||
/// we will not wait for answers of those requests.
|
||||
/// Also all reporting for the responses is lost, because we do not care.
|
||||
/// NOTE: The requests can be in any communication state after this function
|
||||
/// and you should not read them. If you care for response use performRequests
|
||||
/// instead.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void ClusterComm::fireAndForgetRequests(std::vector<ClusterCommRequest>& requests) {
|
||||
if (requests.size() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
CoordTransactionID coordinatorTransactionID = TRI_NewTickServer();
|
||||
|
||||
double const shortTimeout = 10.0; // Picked arbitrarily
|
||||
for (auto& req : requests) {
|
||||
asyncRequest("", coordinatorTransactionID, req.destination, req.requestType,
|
||||
req.path, req.body, req.headerFields, nullptr, shortTimeout, false,
|
||||
2.0);
|
||||
}
|
||||
// Forget about it
|
||||
drop("", coordinatorTransactionID, 0, "");
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief this is the fast path method for performRequests for the case
|
||||
/// of only a single request in the vector. In this case we can use a single
|
||||
|
|
|
@ -363,6 +363,19 @@ struct ClusterCommRequest {
|
|||
body(body),
|
||||
done(false) {}
|
||||
|
||||
ClusterCommRequest(std::string const& dest, rest::RequestType type,
|
||||
std::string const& path,
|
||||
std::shared_ptr<std::string const> body,
|
||||
std::unique_ptr<std::unordered_map<std::string, std::string>>& headers)
|
||||
: destination(dest),
|
||||
requestType(type),
|
||||
path(path),
|
||||
body(body),
|
||||
done(false) {
|
||||
headerFields = std::move(headers);
|
||||
}
|
||||
|
||||
|
||||
void setHeaders(
|
||||
std::unique_ptr<std::unordered_map<std::string, std::string>>& headers) {
|
||||
headerFields = std::move(headers);
|
||||
|
@ -523,6 +536,20 @@ class ClusterComm {
|
|||
arangodb::LogTopic const& logTopic,
|
||||
bool retryOnCollNotFound);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief this method performs the given requests described by the vector
|
||||
/// of ClusterCommRequest structs in the following way:
|
||||
/// Each request is done with asyncRequest.
|
||||
/// After each request is successfully send out we drop all requests.
|
||||
/// Hence it is guaranteed that all requests are send, but
|
||||
/// we will not wait for answers of those requests.
|
||||
/// Also all reporting for the responses is lost, because we do not care.
|
||||
/// NOTE: The requests can be in any communication state after this function
|
||||
/// and you should not read them. If you care for response use performRequests
|
||||
/// instead.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
void fireAndForgetRequests(std::vector<ClusterCommRequest>& requests);
|
||||
|
||||
std::shared_ptr<communicator::Communicator> communicator() {
|
||||
return _communicator;
|
||||
}
|
||||
|
|
|
@ -888,7 +888,8 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
std::vector<std::pair<std::string, uint64_t>>& result) {
|
||||
std::vector<std::pair<std::string, uint64_t>>& result,
|
||||
bool sendNoLockHeader) {
|
||||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
|
@ -911,12 +912,24 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
auto shards = collinfo->shardIds();
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
auto body = std::make_shared<std::string>();
|
||||
for (auto const& p : *shards) {
|
||||
requests.emplace_back("shard:" + p.first,
|
||||
arangodb::rest::RequestType::GET,
|
||||
"/_db/" + StringUtils::urlEncode(dbname) +
|
||||
"/_api/collection/" +
|
||||
StringUtils::urlEncode(p.first) + "/count", body);
|
||||
if (sendNoLockHeader) {
|
||||
for (auto const& p : *shards) {
|
||||
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
headers->emplace("x-arango-nolock", p.first);
|
||||
requests.emplace_back(
|
||||
"shard:" + p.first, arangodb::rest::RequestType::GET,
|
||||
"/_db/" + StringUtils::urlEncode(dbname) + "/_api/collection/" +
|
||||
StringUtils::urlEncode(p.first) + "/count",
|
||||
body, headers);
|
||||
}
|
||||
} else {
|
||||
for (auto const& p : *shards) {
|
||||
requests.emplace_back("shard:" + p.first,
|
||||
arangodb::rest::RequestType::GET,
|
||||
"/_db/" + StringUtils::urlEncode(dbname) +
|
||||
"/_api/collection/" +
|
||||
StringUtils::urlEncode(p.first) + "/count", body);
|
||||
}
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
cc->performRequests(requests, CL_DEFAULT_TIMEOUT, nrDone, Logger::QUERIES, true);
|
||||
|
|
|
@ -87,7 +87,8 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
std::vector<std::pair<std::string, uint64_t>>& result);
|
||||
std::vector<std::pair<std::string, uint64_t>>& result,
|
||||
bool sendNoLockHeader);
|
||||
|
||||
int selectivityEstimatesOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||
std::unordered_map<std::string, double>& result);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryString.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/Result.h"
|
||||
#include "Graph/EdgeCursor.h"
|
||||
#include "Graph/ShortestPathOptions.h"
|
||||
#include "Graph/TraverserCache.h"
|
||||
|
@ -51,7 +52,28 @@ static const std::string TYPE = "type";
|
|||
static const std::string VARIABLES = "variables";
|
||||
static const std::string VERTICES = "vertices";
|
||||
|
||||
BaseEngine::BaseEngine(TRI_vocbase_t* vocbase, VPackSlice info)
|
||||
#ifndef USE_ENTERPRISE
|
||||
std::unique_ptr<BaseEngine> BaseEngine::BuildEngine(TRI_vocbase_t* vocbase,
|
||||
VPackSlice info,
|
||||
bool needToLock) {
|
||||
VPackSlice type = info.get(std::vector<std::string>({"options", "type"}));
|
||||
if (!type.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
"The body requires an 'options.type' attribute.");
|
||||
}
|
||||
if (type.isEqualString("traversal")) {
|
||||
return std::make_unique<TraverserEngine>(vocbase, info, needToLock);
|
||||
} else if (type.isEqualString("shortestPath")) {
|
||||
return std::make_unique<ShortestPathEngine>(vocbase, info, needToLock);
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
"The 'options.type' attribute either has to be traversal or shortestPath");
|
||||
}
|
||||
#endif
|
||||
|
||||
BaseEngine::BaseEngine(TRI_vocbase_t* vocbase, VPackSlice info, bool needToLock)
|
||||
: _query(nullptr), _trx(nullptr), _collections(vocbase) {
|
||||
VPackSlice shardsSlice = info.get(SHARDS);
|
||||
if (shardsSlice.isNone() || !shardsSlice.isObject()) {
|
||||
|
@ -121,6 +143,9 @@ BaseEngine::BaseEngine(TRI_vocbase_t* vocbase, VPackSlice info)
|
|||
trxOpts, true);
|
||||
#endif
|
||||
|
||||
if (!needToLock) {
|
||||
_trx->addHint(transaction::Hints::Hint::LOCK_NEVER);
|
||||
}
|
||||
// true here as last argument is crucial: it leads to the fact that the
|
||||
// created transaction is considered a "MAIN" part and will not switch
|
||||
// off collection locking completely!
|
||||
|
@ -179,6 +204,10 @@ bool BaseEngine::lockCollection(std::string const& shard) {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result BaseEngine::lockAll() {
|
||||
return _trx->lockCollections();
|
||||
}
|
||||
|
||||
std::shared_ptr<transaction::Context> BaseEngine::context() const {
|
||||
return _trx->transactionContext();
|
||||
}
|
||||
|
@ -236,8 +265,9 @@ void BaseEngine::getVertexData(VPackSlice vertex, VPackBuilder& builder) {
|
|||
}
|
||||
|
||||
BaseTraverserEngine::BaseTraverserEngine(TRI_vocbase_t* vocbase,
|
||||
VPackSlice info)
|
||||
: BaseEngine(vocbase, info), _opts(nullptr) {}
|
||||
VPackSlice info,
|
||||
bool needToLock)
|
||||
: BaseEngine(vocbase, info, needToLock), _opts(nullptr) {}
|
||||
|
||||
BaseTraverserEngine::~BaseTraverserEngine() {}
|
||||
|
||||
|
@ -368,8 +398,9 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, size_t depth,
|
|||
}
|
||||
|
||||
ShortestPathEngine::ShortestPathEngine(TRI_vocbase_t* vocbase,
|
||||
arangodb::velocypack::Slice info)
|
||||
: BaseEngine(vocbase, info) {
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock)
|
||||
: BaseEngine(vocbase, info, needToLock) {
|
||||
VPackSlice optsSlice = info.get(OPTIONS);
|
||||
if (optsSlice.isNone() || !optsSlice.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
|
@ -465,8 +496,9 @@ void ShortestPathEngine::getEdges(VPackSlice vertex, bool backward,
|
|||
}
|
||||
|
||||
TraverserEngine::TraverserEngine(TRI_vocbase_t* vocbase,
|
||||
arangodb::velocypack::Slice info)
|
||||
: BaseTraverserEngine(vocbase, info) {
|
||||
arangodb::velocypack::Slice info, bool needToLock)
|
||||
: BaseTraverserEngine(vocbase, info, needToLock) {
|
||||
|
||||
VPackSlice optsSlice = info.get(OPTIONS);
|
||||
if (!optsSlice.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
|
|
|
@ -30,7 +30,9 @@
|
|||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
|
||||
class Result;
|
||||
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
class Context;
|
||||
|
@ -68,9 +70,10 @@ class BaseEngine {
|
|||
// does not get informed properly
|
||||
|
||||
static std::unique_ptr<BaseEngine> BuildEngine(TRI_vocbase_t* vocbase,
|
||||
arangodb::velocypack::Slice);
|
||||
arangodb::velocypack::Slice info,
|
||||
bool needToLock);
|
||||
|
||||
BaseEngine(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
BaseEngine(TRI_vocbase_t* vocbase, arangodb::velocypack::Slice info, bool needToLock);
|
||||
|
||||
public:
|
||||
virtual ~BaseEngine();
|
||||
|
@ -83,6 +86,8 @@ class BaseEngine {
|
|||
|
||||
bool lockCollection(std::string const&);
|
||||
|
||||
Result lockAll();
|
||||
|
||||
std::shared_ptr<transaction::Context> context() const;
|
||||
|
||||
virtual EngineType getType() const = 0;
|
||||
|
@ -102,7 +107,7 @@ class BaseTraverserEngine : public BaseEngine {
|
|||
// deletes an engine but the registry
|
||||
// does not get informed properly
|
||||
|
||||
BaseTraverserEngine(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
BaseTraverserEngine(TRI_vocbase_t*, arangodb::velocypack::Slice, bool needToLock);
|
||||
|
||||
virtual ~BaseTraverserEngine();
|
||||
|
||||
|
@ -133,7 +138,7 @@ class ShortestPathEngine : public BaseEngine {
|
|||
// deletes an engine but the registry
|
||||
// does not get informed properly
|
||||
|
||||
ShortestPathEngine(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
ShortestPathEngine(TRI_vocbase_t*, arangodb::velocypack::Slice, bool needToLock);
|
||||
|
||||
virtual ~ShortestPathEngine();
|
||||
|
||||
|
@ -155,7 +160,7 @@ class TraverserEngine : public BaseTraverserEngine {
|
|||
// deletes an engine but the registry
|
||||
// does not get informed properly
|
||||
|
||||
TraverserEngine(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
TraverserEngine(TRI_vocbase_t*, arangodb::velocypack::Slice, bool needToLock);
|
||||
|
||||
~TraverserEngine();
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/TraverserEngine.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
|
@ -35,31 +36,12 @@
|
|||
|
||||
using namespace arangodb::traverser;
|
||||
|
||||
#ifndef USE_ENTERPRISE
|
||||
std::unique_ptr<BaseEngine> BaseEngine::BuildEngine(TRI_vocbase_t* vocbase,
|
||||
VPackSlice info) {
|
||||
VPackSlice type = info.get(std::vector<std::string>({"options", "type"}));
|
||||
if (!type.isString()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
"The body requires an 'options.type' attribute.");
|
||||
}
|
||||
if (type.isEqualString("traversal")) {
|
||||
return std::make_unique<TraverserEngine>(vocbase, info);
|
||||
} else if (type.isEqualString("shortestPath")) {
|
||||
return std::make_unique<ShortestPathEngine>(vocbase, info);
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
"The 'options.type' attribute either has to be traversal or shortestPath");
|
||||
}
|
||||
#endif
|
||||
|
||||
TraverserEngineRegistry::EngineInfo::EngineInfo(TRI_vocbase_t* vocbase,
|
||||
VPackSlice info)
|
||||
VPackSlice info,
|
||||
bool needToLock)
|
||||
: _isInUse(false),
|
||||
_toBeDeleted(false),
|
||||
_engine(BaseEngine::BuildEngine(vocbase, info)),
|
||||
_engine(BaseEngine::BuildEngine(vocbase, info, needToLock)),
|
||||
_timeToLive(0),
|
||||
_expires(0) {}
|
||||
|
||||
|
@ -75,10 +57,12 @@ TraverserEngineRegistry::~TraverserEngineRegistry() {
|
|||
/// @brief Create a new Engine and return it's id
|
||||
TraverserEngineID TraverserEngineRegistry::createNew(TRI_vocbase_t* vocbase,
|
||||
VPackSlice engineInfo,
|
||||
bool needToLock,
|
||||
double ttl) {
|
||||
TraverserEngineID id = TRI_NewTickServer();
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Register TraverserEngine with id " << id;
|
||||
TRI_ASSERT(id != 0);
|
||||
auto info = std::make_unique<EngineInfo>(vocbase, engineInfo);
|
||||
auto info = std::make_unique<EngineInfo>(vocbase, engineInfo, needToLock);
|
||||
info->_timeToLive = ttl;
|
||||
info->_expires = TRI_microtime() + ttl;
|
||||
|
||||
|
@ -96,11 +80,13 @@ void TraverserEngineRegistry::destroy(TraverserEngineID id) {
|
|||
|
||||
/// @brief Get the engine with the given id
|
||||
BaseEngine* TraverserEngineRegistry::get(TraverserEngineID id) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Load TraverserEngine with id " << id;
|
||||
while (true) {
|
||||
{
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
auto e = _engines.find(id);
|
||||
if (e == _engines.end()) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " not found";
|
||||
// Nothing to hand out
|
||||
// TODO: Should we throw an error instead?
|
||||
return nullptr;
|
||||
|
@ -108,6 +94,7 @@ BaseEngine* TraverserEngineRegistry::get(TraverserEngineID id) {
|
|||
if (!e->second->_isInUse) {
|
||||
// We capture the engine
|
||||
e->second->_isInUse = true;
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " is now in use";
|
||||
return e->second->_engine.get();
|
||||
}
|
||||
// Free write lock
|
||||
|
@ -123,10 +110,12 @@ BaseEngine* TraverserEngineRegistry::get(TraverserEngineID id) {
|
|||
|
||||
/// @brief Returns the engine to the registry. Someone else can now use it.
|
||||
void TraverserEngineRegistry::returnEngine(TraverserEngineID id, double ttl) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Returning TraverserEngine with id " << id;
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
auto e = _engines.find(id);
|
||||
if (e == _engines.end()) {
|
||||
// Nothing to return
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " not found";
|
||||
return;
|
||||
}
|
||||
if (e->second->_isInUse) {
|
||||
|
@ -135,11 +124,14 @@ void TraverserEngineRegistry::returnEngine(TraverserEngineID id, double ttl) {
|
|||
auto engine = e->second;
|
||||
_engines.erase(e);
|
||||
delete engine;
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " is now deleted";
|
||||
} else {
|
||||
if (ttl >= 0.0) {
|
||||
e->second->_timeToLive = ttl;
|
||||
}
|
||||
e->second->_expires = TRI_microtime() + e->second->_timeToLive;
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " is now free";
|
||||
}
|
||||
// Lockgard send signal auf conditionvar
|
||||
CONDITION_LOCKER(condLocker, _cv);
|
||||
|
@ -149,6 +141,8 @@ void TraverserEngineRegistry::returnEngine(TraverserEngineID id, double ttl) {
|
|||
|
||||
/// @brief Destroy the engine with the given id, worker function
|
||||
void TraverserEngineRegistry::destroy(TraverserEngineID id, bool doLock) {
|
||||
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Destroying TraverserEngine with id " << id;
|
||||
EngineInfo* engine = nullptr;
|
||||
|
||||
{
|
||||
|
@ -161,6 +155,7 @@ void TraverserEngineRegistry::destroy(TraverserEngineID id, bool doLock) {
|
|||
// TODO what about shard locking?
|
||||
// TODO what about multiple dbs?
|
||||
if (e->second->_isInUse) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " still in use, sending kill";
|
||||
// Someone is still working with this engine. Mark it as to be deleted
|
||||
e->second->_toBeDeleted = true;
|
||||
return;
|
||||
|
@ -169,6 +164,7 @@ void TraverserEngineRegistry::destroy(TraverserEngineID id, bool doLock) {
|
|||
engine = e->second;
|
||||
_engines.erase(id);
|
||||
}
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "TraverserEngine with id " << id << " is now destroyed";
|
||||
|
||||
delete engine;
|
||||
}
|
||||
|
@ -192,6 +188,7 @@ void TraverserEngineRegistry::expireEngines() {
|
|||
|
||||
for (auto& p : toDelete) {
|
||||
try { // just in case
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Destroy TraverserEngine with id " << p << " because of timeout";
|
||||
destroy(p, true);
|
||||
} catch (...) {
|
||||
}
|
||||
|
@ -214,6 +211,7 @@ void TraverserEngineRegistry::destroyAll() {
|
|||
}
|
||||
}
|
||||
for (auto& i : engines) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::AQL) << "Destroy TraverserEngine with id " << i << " due to shutdown";
|
||||
destroy(i, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,14 +43,15 @@ class TraverserEngineRegistry {
|
|||
public:
|
||||
TraverserEngineRegistry() {}
|
||||
|
||||
~TraverserEngineRegistry();
|
||||
TEST_VIRTUAL ~TraverserEngineRegistry();
|
||||
|
||||
/// @brief Create a new Engine in the registry.
|
||||
/// It can be referred to by the returned
|
||||
/// ID. If the returned ID is 0 something
|
||||
/// internally went wrong.
|
||||
TraverserEngineID createNew(TRI_vocbase_t*, arangodb::velocypack::Slice,
|
||||
double ttl = 600.0);
|
||||
TEST_VIRTUAL TraverserEngineID createNew(TRI_vocbase_t*, arangodb::velocypack::Slice,
|
||||
bool needToLock,
|
||||
double ttl = 600.0);
|
||||
|
||||
/// @brief Get the engine with the given ID.
|
||||
/// TODO Test what happens if this pointer
|
||||
|
@ -89,7 +90,7 @@ class TraverserEngineRegistry {
|
|||
double _timeToLive; // in seconds
|
||||
double _expires; // UNIX UTC timestamp for expiration
|
||||
|
||||
EngineInfo(TRI_vocbase_t*, arangodb::velocypack::Slice);
|
||||
EngineInfo(TRI_vocbase_t*, arangodb::velocypack::Slice, bool needToLock);
|
||||
~EngineInfo();
|
||||
};
|
||||
|
||||
|
|
|
@ -334,6 +334,11 @@ void GeneralServerFeature::defineHandlers() {
|
|||
auto queryRegistry = QueryRegistryFeature::QUERY_REGISTRY;
|
||||
auto traverserEngineRegistry =
|
||||
TraverserEngineRegistryFeature::TRAVERSER_ENGINE_REGISTRY;
|
||||
if (_combinedRegistries == nullptr) {
|
||||
_combinedRegistries = std::make_unique<std::pair<aql::QueryRegistry*, traverser::TraverserEngineRegistry*>> (queryRegistry, traverserEngineRegistry);
|
||||
} else {
|
||||
TRI_ASSERT(false);
|
||||
}
|
||||
|
||||
// ...........................................................................
|
||||
// /_msg
|
||||
|
@ -421,11 +426,15 @@ void GeneralServerFeature::defineHandlers() {
|
|||
_handlerFactory->addPrefixHandler(
|
||||
RestVocbaseBaseHandler::VIEW_PATH,
|
||||
RestHandlerCreator<RestViewHandler>::createNoData);
|
||||
|
||||
|
||||
// This is the only handler were we need to inject
|
||||
// more than one data object. So we created the combinedRegistries
|
||||
// for it.
|
||||
_handlerFactory->addPrefixHandler(
|
||||
"/_api/aql",
|
||||
RestHandlerCreator<aql::RestAqlHandler>::createData<aql::QueryRegistry*>,
|
||||
queryRegistry);
|
||||
RestHandlerCreator<aql::RestAqlHandler>::createData<
|
||||
std::pair<aql::QueryRegistry*, traverser::TraverserEngineRegistry*>*>,
|
||||
_combinedRegistries.get());
|
||||
|
||||
_handlerFactory->addPrefixHandler(
|
||||
"/_api/aql-builtin",
|
||||
|
|
|
@ -28,6 +28,15 @@
|
|||
#include "Basics/asio-helper.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace aql {
|
||||
class QueryRegistry;
|
||||
}
|
||||
|
||||
namespace traverser {
|
||||
class TraverserEngineRegistry;
|
||||
}
|
||||
|
||||
namespace rest {
|
||||
class AsyncJobManager;
|
||||
class RestHandlerFactory;
|
||||
|
@ -111,6 +120,7 @@ class GeneralServerFeature final
|
|||
private:
|
||||
std::unique_ptr<rest::RestHandlerFactory> _handlerFactory;
|
||||
std::unique_ptr<rest::AsyncJobManager> _jobManager;
|
||||
std::unique_ptr<std::pair<aql::QueryRegistry*, traverser::TraverserEngineRegistry*>> _combinedRegistries;
|
||||
std::vector<rest::GeneralServer*> _servers;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ void InternalRestTraverserHandler::createEngine() {
|
|||
"Expected an object with traverser information as body parameter");
|
||||
return;
|
||||
}
|
||||
TraverserEngineID id = _registry->createNew(_vocbase, parsedBody->slice());
|
||||
TraverserEngineID id = _registry->createNew(_vocbase, parsedBody->slice(), true);
|
||||
TRI_ASSERT(id != 0);
|
||||
VPackBuilder resultBuilder;
|
||||
resultBuilder.add(VPackValue(id));
|
||||
|
|
|
@ -697,6 +697,7 @@ Result Syncer::createIndex(VPackSlice const& slice) {
|
|||
col->id(),
|
||||
AccessMode::Type::WRITE
|
||||
);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/CollectionLockState.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Rest/HttpRequest.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
|
|
|
@ -35,8 +35,9 @@
|
|||
struct TRI_vocbase_t;
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
/// @brief just also stores the context
|
||||
class VocbaseContext final : public arangodb::ExecContext {
|
||||
class VocbaseContext : public arangodb::ExecContext {
|
||||
private:
|
||||
VocbaseContext(VocbaseContext const&) = delete;
|
||||
VocbaseContext& operator=(VocbaseContext const&) = delete;
|
||||
|
@ -45,13 +46,14 @@ class VocbaseContext final : public arangodb::ExecContext {
|
|||
|
||||
public:
|
||||
static double ServerSessionTtl;
|
||||
~VocbaseContext();
|
||||
|
||||
TEST_VIRTUAL ~VocbaseContext();
|
||||
|
||||
public:
|
||||
|
||||
static VocbaseContext* create(GeneralRequest*, TRI_vocbase_t*);
|
||||
|
||||
TRI_vocbase_t* vocbase() const { return _vocbase; }
|
||||
TEST_VIRTUAL TRI_vocbase_t* vocbase() const { return _vocbase; }
|
||||
|
||||
/// @brief upgrade to internal superuser
|
||||
void forceSuperuser();
|
||||
|
|
|
@ -866,7 +866,7 @@ OperationResult transaction::Methods::anyLocal(
|
|||
if (cid == 0) {
|
||||
throwCollectionNotFound(collectionName.c_str());
|
||||
}
|
||||
|
||||
|
||||
pinData(cid); // will throw when it fails
|
||||
|
||||
VPackBuilder resultBuilder;
|
||||
|
@ -892,7 +892,7 @@ OperationResult transaction::Methods::anyLocal(
|
|||
return OperationResult(res);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resultBuilder.close();
|
||||
|
||||
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
|
||||
|
@ -1003,7 +1003,7 @@ void transaction::Methods::invokeOnAllElements(
|
|||
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
|
||||
THROW_ARANGO_EXCEPTION(lockResult);
|
||||
}
|
||||
|
||||
|
||||
TRI_ASSERT(isLocked(collection, AccessMode::Type::READ));
|
||||
|
||||
collection->invokeOnAllElements(this, callback);
|
||||
|
@ -1778,7 +1778,7 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
|
||||
return OperationResult(lockResult);
|
||||
}
|
||||
|
||||
|
||||
VPackBuilder resultBuilder; // building the complete result
|
||||
TRI_voc_tick_t maxTick = 0;
|
||||
|
||||
|
@ -2299,7 +2299,7 @@ OperationResult transaction::Methods::allLocal(
|
|||
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
|
||||
|
||||
pinData(cid); // will throw when it fails
|
||||
|
||||
|
||||
VPackBuilder resultBuilder;
|
||||
resultBuilder.openArray();
|
||||
|
||||
|
@ -2328,7 +2328,7 @@ OperationResult transaction::Methods::allLocal(
|
|||
return OperationResult(res);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resultBuilder.close();
|
||||
|
||||
return OperationResult(Result(), resultBuilder.steal(), _transactionContextPtr->orderCustomTypeHandler(), false);
|
||||
|
@ -2394,7 +2394,7 @@ OperationResult transaction::Methods::truncateLocal(
|
|||
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
|
||||
return OperationResult(lockResult);
|
||||
}
|
||||
|
||||
|
||||
TRI_ASSERT(isLocked(collection, AccessMode::Type::WRITE));
|
||||
|
||||
try {
|
||||
|
@ -2534,7 +2534,7 @@ OperationResult transaction::Methods::count(std::string const& collectionName,
|
|||
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
|
||||
|
||||
if (_state->isCoordinator()) {
|
||||
return countCoordinator(collectionName, aggregate);
|
||||
return countCoordinator(collectionName, aggregate, true);
|
||||
}
|
||||
|
||||
return countLocal(collectionName);
|
||||
|
@ -2543,9 +2543,10 @@ OperationResult transaction::Methods::count(std::string const& collectionName,
|
|||
/// @brief count the number of documents in a collection
|
||||
#ifndef USE_ENTERPRISE
|
||||
OperationResult transaction::Methods::countCoordinator(
|
||||
std::string const& collectionName, bool aggregate) {
|
||||
std::string const& collectionName, bool aggregate, bool sendNoLockHeader) {
|
||||
std::vector<std::pair<std::string, uint64_t>> count;
|
||||
int res = arangodb::countOnCoordinator(databaseName(), collectionName, count);
|
||||
int res = arangodb::countOnCoordinator(databaseName(), collectionName, count,
|
||||
sendNoLockHeader);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return OperationResult(res);
|
||||
|
@ -2566,7 +2567,7 @@ OperationResult transaction::Methods::countLocal(
|
|||
if (!lockResult.ok() && !lockResult.is(TRI_ERROR_LOCKED)) {
|
||||
return OperationResult(lockResult);
|
||||
}
|
||||
|
||||
|
||||
TRI_ASSERT(isLocked(collection, AccessMode::Type::READ));
|
||||
|
||||
uint64_t num = collection->numberDocuments(this);
|
||||
|
@ -2963,6 +2964,13 @@ bool transaction::Methods::isLocked(LogicalCollection* document,
|
|||
if (_state == nullptr || _state->status() != transaction::Status::RUNNING) {
|
||||
return false;
|
||||
}
|
||||
if (_state->hasHint(Hints::Hint::LOCK_NEVER)) {
|
||||
// In the lock never case we have made sure that
|
||||
// some other process holds this lock.
|
||||
// So we can lie here and report that it actually
|
||||
// is locked!
|
||||
return true;
|
||||
}
|
||||
|
||||
TransactionCollection* trxColl = trxCollection(document->id(), type);
|
||||
TRI_ASSERT(trxColl != nullptr);
|
||||
|
@ -3211,4 +3219,4 @@ Result transaction::Methods::resolveId(char const* handle, size_t length,
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -41,9 +41,10 @@
|
|||
#ifdef USE_ENTERPRISE
|
||||
#define ENTERPRISE_VIRT virtual
|
||||
#else
|
||||
#define ENTERPRISE_VIRT
|
||||
#define ENTERPRISE_VIRT TEST_VIRTUAL
|
||||
#endif
|
||||
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace basics {
|
||||
|
@ -308,7 +309,7 @@ class Methods {
|
|||
OperationOptions const& options);
|
||||
|
||||
/// @brief count the number of documents in a collection
|
||||
ENTERPRISE_VIRT OperationResult count(std::string const& collectionName, bool aggregate);
|
||||
virtual OperationResult count(std::string const& collectionName, bool aggregate);
|
||||
|
||||
/// @brief Gets the best fitting index for an AQL condition.
|
||||
/// note: the caller must have read-locked the underlying collection when
|
||||
|
@ -471,11 +472,13 @@ class Methods {
|
|||
OperationOptions const& options);
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
OperationResult countCoordinator(std::string const& collectionName,
|
||||
bool aggregate, bool sendNoLockHeader);
|
||||
|
||||
OperationResult countCoordinator(std::string const& collectionName, bool aggregate);
|
||||
OperationResult countLocal(std::string const& collectionName);
|
||||
|
||||
protected:
|
||||
/// @brief return the transaction collection for a document collection
|
||||
ENTERPRISE_VIRT TransactionCollection* trxCollection(TRI_voc_cid_t cid,
|
||||
AccessMode::Type type = AccessMode::Type::READ) const;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "Basics/conversions.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Cluster/CollectionLockState.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
|
@ -951,7 +952,7 @@ static void JS_DropVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
allowDropSystem = TRI_ObjectToBoolean(args[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Result res = methods::Collections::drop(vocbase, collection,
|
||||
allowDropSystem, timeout);
|
||||
if (res.fail()) {
|
||||
|
@ -2812,6 +2813,11 @@ static void JS_CountVocbaseCol(
|
|||
|
||||
SingleCollectionTransaction trx(transaction::V8Context::Create(vocbase, true), collectionName, AccessMode::Type::READ);
|
||||
|
||||
if (CollectionLockState::_noLockHeaders != nullptr) {
|
||||
if (CollectionLockState::_noLockHeaders->find(collectionName) != CollectionLockState::_noLockHeaders->end()) {
|
||||
trx.addHint(transaction::Hints::Hint::LOCK_NEVER);
|
||||
}
|
||||
}
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
|
|
|
@ -98,4 +98,12 @@ struct AccessMode {
|
|||
|
||||
}
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<arangodb::AccessMode::Type> {
|
||||
size_t operator()(arangodb::AccessMode::Type const& value) const noexcept {
|
||||
return static_cast<size_t>(value);
|
||||
}
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -133,7 +133,7 @@ struct TRI_vocbase_t {
|
|||
|
||||
TRI_vocbase_t(TRI_vocbase_type_e type, TRI_voc_tick_t id,
|
||||
std::string const& name);
|
||||
~TRI_vocbase_t();
|
||||
TEST_VIRTUAL ~TRI_vocbase_t();
|
||||
|
||||
private:
|
||||
/// @brief sleep interval used when polling for a loading collection's status
|
||||
|
|
|
@ -624,10 +624,12 @@ function ahuacatlCollectionCountTestSuite () {
|
|||
setUp : function () {
|
||||
db._drop(cn);
|
||||
c = db._create(cn, { numberOfShards: 4 });
|
||||
let docs = []
|
||||
|
||||
for (var i = 1; i <= 1000; ++i) {
|
||||
c.insert({ _key: "test" + i });
|
||||
docs.push({ _key: "test" + i });
|
||||
}
|
||||
c.insert(docs);
|
||||
},
|
||||
|
||||
tearDown : function () {
|
||||
|
|
|
@ -0,0 +1,381 @@
|
|||
/*jshint globalstrict:false, strict:false */
|
||||
/*global assertEqual, assertTrue */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for query language, subqueries in cluster
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2018-2018 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
/// @author Copyright 2018, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
const jsunity = require("jsunity");
|
||||
const helper = require("@arangodb/aql-helper");
|
||||
const getQueryResults = helper.getQueryResults;
|
||||
const db = require("internal").db;
|
||||
const c1 = "UnitTestSubQuery1";
|
||||
const c2 = "UnitTestSubQuery2";
|
||||
const c3 = "UnitTestSubQuery3";
|
||||
const c4 = "UnitTestSubQuery4";
|
||||
const c5 = "UnitTestSubQuery5";
|
||||
const c6 = "UnitTestSubQuery6";
|
||||
const c7 = "UnitTestSubQuery7";
|
||||
const c8 = "UnitTestSubQuery8";
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
/**
|
||||
* @brief This suite is supposed to test subquery execution
|
||||
* in the cluster case. Planning for subqueries tend to be
|
||||
* complicated and thereby error prone.
|
||||
*
|
||||
* NOTE: Please do not take the following AQLs as well designed
|
||||
* production queries or best practices. They are made artificially
|
||||
* complex for the internals.
|
||||
*
|
||||
* @return The TestStuie
|
||||
*/
|
||||
function clusterSubqueriesTestSuite () {
|
||||
const clean = () => {
|
||||
db._drop(c1);
|
||||
db._drop(c2);
|
||||
db._drop(c3);
|
||||
db._drop(c4);
|
||||
db._drop(c5);
|
||||
db._drop(c6);
|
||||
db._drop(c7);
|
||||
db._drop(c8);
|
||||
};
|
||||
|
||||
return {
|
||||
|
||||
setUp: function () {
|
||||
clean();
|
||||
db._create(c1, {numberOfShards: 4});
|
||||
db._create(c2, {numberOfShards: 4});
|
||||
db._create(c3, {numberOfShards: 4});
|
||||
db._create(c4, {numberOfShards: 4});
|
||||
db._create(c5, {numberOfShards: 4});
|
||||
db._create(c6, {numberOfShards: 4});
|
||||
db._create(c7, {numberOfShards: 4});
|
||||
db._create(c8, {numberOfShards: 4});
|
||||
},
|
||||
|
||||
tearDown: clean,
|
||||
|
||||
// This test validates that all input values `x` are
|
||||
// transportet to the input register of the subquery
|
||||
// This is done via successful initializeCursor calls
|
||||
testSimpleInitialzeCursor: function () {
|
||||
let docs = [];
|
||||
// We add 5 times each of the values 0 -> 19
|
||||
// In the query we will just use 1->10
|
||||
for (let i = 0; i < 20; ++i) {
|
||||
for (let j = 0; j < 5; ++j) {
|
||||
docs.push({foo: `bar${i}`});
|
||||
}
|
||||
}
|
||||
db[c1].save(docs);
|
||||
let q = `
|
||||
FOR x IN 1..10
|
||||
LET sub = (
|
||||
FOR y IN ${c1}
|
||||
FILTER y.foo == CONCAT("bar", TO_STRING(x))
|
||||
RETURN y
|
||||
)
|
||||
RETURN sub[*].foo
|
||||
`;
|
||||
let c = db._query(q).toArray();
|
||||
assertEqual(c.length, 10) // we have 10 values for X
|
||||
let seen = new Set();
|
||||
for (let d of c) {
|
||||
assertEqual(d.length, 5) // we have 5 values for each x
|
||||
let first = d[0];
|
||||
seen.add(first);
|
||||
for (let q of d) {
|
||||
assertEqual(q, first);
|
||||
}
|
||||
}
|
||||
assertEqual(seen.size, 10); // We have 10 different values
|
||||
},
|
||||
|
||||
// Tests if all snippets are created correctly
|
||||
// in subquery case if both root nodes
|
||||
// end on DBServers
|
||||
testSnippetsRootDB: function () {
|
||||
let docsA = [];
|
||||
let docsB = [];
|
||||
let docsC = [];
|
||||
let fooVals = new Map();
|
||||
// We add 5 times each of the values 1 -> 10
|
||||
for (let i = 1; i < 11; ++i) {
|
||||
fooVals.set(`${i}`, 0);
|
||||
docsA.push({foo: `${i}`});
|
||||
for (let j = 0; j < 5; ++j) {
|
||||
docsB.push({foo: `${i}`});
|
||||
docsC.push({foo: `${i}`});
|
||||
}
|
||||
}
|
||||
db[c1].save(docsA);
|
||||
db[c2].save(docsB);
|
||||
db[c3].save(docsC);
|
||||
|
||||
let q = `
|
||||
FOR a IN ${c1}
|
||||
LET sub = (
|
||||
FOR b IN ${c2}
|
||||
FILTER b.foo == a.foo
|
||||
SORT b._key // For easier check in result
|
||||
RETURN b
|
||||
)
|
||||
FOR c IN ${c3}
|
||||
FILTER c.foo == a.foo
|
||||
FILTER c.foo == sub[0].foo
|
||||
RETURN {
|
||||
a: a._key,
|
||||
b: sub[*]._key,
|
||||
c: c._key,
|
||||
foos: {
|
||||
a: a.foo,
|
||||
c: c.foo,
|
||||
b: sub[*].foo
|
||||
}
|
||||
}
|
||||
`;
|
||||
db._explain(q);
|
||||
let c = db._query(q).toArray();
|
||||
assertEqual(c.length, 10 * 5);
|
||||
// For 10 A values we find 5 C values. Each sharing identical 5 B values.
|
||||
let seen = new Set();
|
||||
for (let d of c) {
|
||||
// We found all 5 elements in subquery
|
||||
assertEqual(d.b.length, 5);
|
||||
assertEqual(d.foos.b.length, 5);
|
||||
// Check that all a,b,c combinations are unique
|
||||
let key = d.a + d.b + d.c;
|
||||
assertFalse(seen.has(key)); // Every combination is unique
|
||||
seen.add(key);
|
||||
// Check that all foo values are correct
|
||||
assertEqual(d.foos.a, d.foos.c);
|
||||
for (let f of d.foos.b) {
|
||||
assertEqual(d.foos.a, f);
|
||||
}
|
||||
// Increase that we found this specific value.
|
||||
// We need to find exactly 5 of each
|
||||
fooVals.set(d.foos.a, fooVals.get(d.foos.a) + 1);
|
||||
}
|
||||
assertEqual(fooVals.size, 10); // we have 10 different foos
|
||||
for (let [key, value] of fooVals) {
|
||||
assertEqual(value, 5, `Found to few of foo: ${key}`);
|
||||
}
|
||||
},
|
||||
|
||||
// Tests if all snippets are created correctly
|
||||
// in subquery case if both root nodes
|
||||
// end on Coordinator
|
||||
testSnippetsRootCoordinator: function () {
|
||||
let docsA = [];
|
||||
let docsB = [];
|
||||
let docsC = [];
|
||||
let fooVals = new Map();
|
||||
// We add 5 times each of the values 1 -> 10
|
||||
for (let i = 1; i < 11; ++i) {
|
||||
fooVals.set(`${i}`, 0);
|
||||
docsA.push({foo: `${i}`, val: "baz"});
|
||||
for (let j = 0; j < 5; ++j) {
|
||||
docsB.push({foo: `${i}`, val: "bar"});
|
||||
docsC.push({foo: `${i}`});
|
||||
}
|
||||
}
|
||||
db[c1].save(docsA);
|
||||
db[c2].save(docsB);
|
||||
db[c3].save(docsC);
|
||||
db[c4].save([
|
||||
{_key: "a", val: "baz"},
|
||||
{_key: "b", val: "bar"}
|
||||
]);
|
||||
|
||||
let q = `
|
||||
LET src = DOCUMENT("${c4}/a")
|
||||
FOR a IN ${c1}
|
||||
FILTER a.val == src.val
|
||||
LET sub = (
|
||||
LET src2 = DOCUMENT("${c4}/b")
|
||||
FOR b IN ${c2}
|
||||
FILTER b.foo == a.foo
|
||||
FILTER b.val == src2.val
|
||||
RETURN b
|
||||
)
|
||||
FOR c IN ${c3}
|
||||
FILTER c.foo == a.foo
|
||||
FILTER c.foo == sub[0].foo
|
||||
RETURN {
|
||||
a: a._key,
|
||||
b: sub[*]._key,
|
||||
c: c._key,
|
||||
foos: {
|
||||
a: a.foo,
|
||||
c: c.foo,
|
||||
b: sub[*].foo
|
||||
}
|
||||
}
|
||||
`;
|
||||
let c = db._query(q).toArray();
|
||||
assertEqual(c.length, 10 * 5);
|
||||
// For 10 A values we find 5 C values. Each sharing identical 5 B values.
|
||||
let seen = new Set();
|
||||
for (let d of c) {
|
||||
// We found all 5 elements in subquery
|
||||
assertEqual(d.b.length, 5);
|
||||
assertEqual(d.foos.b.length, 5);
|
||||
// Check that all a,b,c combinations are unique
|
||||
let key = d.a + d.b + d.c;
|
||||
assertFalse(seen.has(key)); // Every combination is unique
|
||||
seen.add(key);
|
||||
// Check that all foo values are correct
|
||||
assertEqual(d.foos.a, d.foos.c);
|
||||
for (let f of d.foos.b) {
|
||||
assertEqual(d.foos.a, f);
|
||||
}
|
||||
// Increase that we found this specific value.
|
||||
// We need to find exactly 5 of each
|
||||
fooVals.set(d.foos.a, fooVals.get(d.foos.a) + 1);
|
||||
}
|
||||
assertEqual(fooVals.size, 10); // we have 10 different foos
|
||||
for (let [key, value] of fooVals) {
|
||||
assertEqual(value, 5, `Found to few of foo: ${key}`);
|
||||
}
|
||||
},
|
||||
|
||||
// Tests if we have a wierd
|
||||
// combination of a subquery in a subquery
|
||||
// and switching from Coordinator to DBServer
|
||||
// all the time.
|
||||
//
|
||||
// General IDEA:
|
||||
// We have a subquery node,
|
||||
// where the "main query" contains a subquery
|
||||
// and its "subquery" contains a subquery as well.
|
||||
// Filters use the allowed amount of objects within
|
||||
// the different scopes (e.g. in every subquery we
|
||||
// validate that we still have access to the main
|
||||
// query variables)
|
||||
testSubqueryInception: function () {
|
||||
let docsA = [];
|
||||
let docsB = [];
|
||||
let docsC = [];
|
||||
let docsD = [];
|
||||
let docsE = [];
|
||||
let docsF = [];
|
||||
let docsG = [];
|
||||
let numDocs = 200;
|
||||
|
||||
for (let i = 0; i < numDocs; ++i) {
|
||||
docsA.push({val: `A${i}`});
|
||||
docsB.push({val: `B${i}`, valA: `A${i}`});
|
||||
docsC.push({val: `C${i}`, valA: `A${i}`, valB: `B${i}`});
|
||||
docsD.push({val: `D${i}`, valA: `A${i}`, valB: `B${i}`, valC: `C${i}`});
|
||||
docsE.push({val: `E${i}`, valA: `A${i}`, valB: `B${i}`, valC: `C${i}`, valD: `D${i}`});
|
||||
docsF.push({val: `F${i}`, valA: `A${i}`, valB: `B${i}`, valC: `C${i}`, valD: `D${i}`, valE: `E${i}`});
|
||||
docsG.push({val: `G${i}`, valA: `A${i}`, valB: `B${i}`, valC: `C${i}`, valD: `D${i}`, valE: `E${i}`, valF: `F${i}`});
|
||||
}
|
||||
|
||||
db[c1].save(docsA);
|
||||
db[c2].ensureHashIndex("valA");
|
||||
db[c2].save(docsB);
|
||||
db[c3].ensureHashIndex("valA", "valB");
|
||||
db[c3].save(docsC);
|
||||
db[c4].ensureHashIndex("valA", "valB", "valC");
|
||||
db[c4].save(docsD);
|
||||
db[c5].ensureHashIndex("valA", "valB", "valC", "valD");
|
||||
db[c5].save(docsE);
|
||||
db[c6].ensureHashIndex("valA", "valB", "valC", "valD", "valE");
|
||||
db[c6].save(docsF);
|
||||
db[c7].ensureHashIndex("valA", "valB", "valC", "valF");
|
||||
db[c7].save(docsG);
|
||||
|
||||
let q = `
|
||||
FOR a IN ${c1}
|
||||
LET s1 = (
|
||||
FOR b IN ${c2}
|
||||
FILTER b.valA == a.val
|
||||
RETURN b
|
||||
)
|
||||
FOR c IN ${c3}
|
||||
FILTER c.valB == s1[0].val
|
||||
FILTER c.valA == a.val
|
||||
LET s2 = (
|
||||
FOR d IN ${c4}
|
||||
FILTER d.valA == a.val
|
||||
FILTER d.valB == s1[0].val
|
||||
FILTER d.valC == c.val
|
||||
LET s3 = (
|
||||
FOR e IN ${c5}
|
||||
FILTER e.valA == a.val
|
||||
FILTER e.valB == s1[0].val
|
||||
FILTER e.valC == c.val
|
||||
FILTER e.valD == d.val
|
||||
RETURN e
|
||||
)
|
||||
FOR f IN ${c6}
|
||||
FILTER f.valA == a.val
|
||||
FILTER f.valB == s1[0].val
|
||||
FILTER f.valC == c.val
|
||||
FILTER f.valD == d.val
|
||||
FILTER f.valE == s3[0].val
|
||||
RETURN f
|
||||
)
|
||||
FOR g IN ${c7}
|
||||
FILTER g.valA == a.val
|
||||
FILTER g.valB == s1[0].val
|
||||
FILTER g.valC == c.val
|
||||
FILTER g.valF == s2[0].val
|
||||
RETURN g
|
||||
`;
|
||||
|
||||
let c = db._query(q).toArray();
|
||||
// We expect numDocs result documents
|
||||
assertEqual(c.length, numDocs);
|
||||
let seen = new Set();
|
||||
for (let d of c) {
|
||||
seen.add(d.val);
|
||||
}
|
||||
// Check that we have numDocs distinct ones
|
||||
assertEqual(seen.size, numDocs);
|
||||
for (let i = 0; i < numDocs; ++i) {
|
||||
assertTrue(seen.has(`G${i}`));
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(clusterSubqueriesTestSuite);
|
||||
|
||||
return jsunity.done();
|
|
@ -103,6 +103,7 @@ class Topics {
|
|||
|
||||
LogTopic Logger::AGENCY("agency", LogLevel::INFO);
|
||||
LogTopic Logger::AGENCYCOMM("agencycomm", LogLevel::INFO);
|
||||
LogTopic Logger::AQL("aql", LogLevel::INFO);
|
||||
LogTopic Logger::AUTHENTICATION("authentication");
|
||||
LogTopic Logger::AUTHORIZATION("authorization");
|
||||
LogTopic Logger::CACHE("cache", LogLevel::INFO);
|
||||
|
|
|
@ -128,6 +128,7 @@ class Logger {
|
|||
public:
|
||||
static LogTopic AGENCY;
|
||||
static LogTopic AGENCYCOMM;
|
||||
static LogTopic AQL;
|
||||
static LogTopic AUTHENTICATION;
|
||||
static LogTopic AUTHORIZATION;
|
||||
static LogTopic CACHE;
|
||||
|
|
|
@ -37,6 +37,9 @@
|
|||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class ExecContext;
|
||||
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
struct Options;
|
||||
|
@ -100,7 +103,7 @@ class GeneralRequest {
|
|||
void setClientTaskId(uint64_t clientTaskId) { _clientTaskId = clientTaskId; }
|
||||
|
||||
/// Database used for this request, _system by default
|
||||
std::string const& databaseName() const { return _databaseName; }
|
||||
TEST_VIRTUAL std::string const& databaseName() const { return _databaseName; }
|
||||
void setDatabaseName(std::string const& databaseName) {
|
||||
_databaseName = databaseName;
|
||||
}
|
||||
|
@ -112,17 +115,19 @@ class GeneralRequest {
|
|||
void setAuthenticated(bool a) { _authenticated = a; }
|
||||
|
||||
// @brief User sending this request
|
||||
std::string const& user() const { return _user; }
|
||||
TEST_VIRTUAL std::string const& user() const { return _user; }
|
||||
void setUser(std::string const& user) { _user = user; }
|
||||
void setUser(std::string&& user) { _user = std::move(user); }
|
||||
|
||||
/// @brief the request context depends on the application
|
||||
RequestContext* requestContext() const { return _requestContext; }
|
||||
TEST_VIRTUAL RequestContext* requestContext() const { return _requestContext; }
|
||||
|
||||
/// @brief set request context and whether this requests is allowed
|
||||
/// to delete it
|
||||
void setRequestContext(RequestContext*, bool isOwner);
|
||||
void setRequestContext(RequestContext*, bool);
|
||||
|
||||
TEST_VIRTUAL RequestType requestType() const { return _type; }
|
||||
|
||||
RequestType requestType() const { return _type; }
|
||||
void setRequestType(RequestType type) { _type = type; }
|
||||
|
||||
std::string const& fullUrl() const { return _fullUrl; }
|
||||
|
@ -149,7 +154,7 @@ class GeneralRequest {
|
|||
void setPrefix(std::string&& prefix) { _prefix = std::move(prefix); }
|
||||
|
||||
// Returns the request path suffixes in non-URL-decoded form
|
||||
std::vector<std::string> const& suffixes() const { return _suffixes; }
|
||||
TEST_VIRTUAL std::vector<std::string> const& suffixes() const { return _suffixes; }
|
||||
|
||||
// Returns the request path suffixes in URL-decoded form. Note: this will
|
||||
// re-compute the suffix list on every call!
|
||||
|
@ -186,7 +191,7 @@ class GeneralRequest {
|
|||
virtual VPackSlice payload(arangodb::velocypack::Options const* options =
|
||||
&VPackOptions::Defaults) = 0;
|
||||
|
||||
std::shared_ptr<VPackBuilder> toVelocyPackBuilderPtr() {
|
||||
TEST_VIRTUAL std::shared_ptr<VPackBuilder> toVelocyPackBuilderPtr() {
|
||||
VPackOptions optionsWithUniquenessCheck = VPackOptions::Defaults;
|
||||
optionsWithUniquenessCheck.checkAttributeUniqueness = true;
|
||||
return std::make_shared<VPackBuilder>(payload(&optionsWithUniquenessCheck), &optionsWithUniquenessCheck);
|
||||
|
|
|
@ -0,0 +1,803 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test case for EngineInfoContainerCoordinator
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
/// @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "fakeit.hpp"
|
||||
|
||||
#include "Aql/AqlResult.h"
|
||||
#include "Aql/EngineInfoContainerCoordinator.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
using namespace fakeit;
|
||||
|
||||
namespace arangodb {
|
||||
namespace tests {
|
||||
namespace engine_info_container_coordinator_test {
|
||||
|
||||
TEST_CASE("EngineInfoContainerCoordinator", "[aql][cluster][coordinator]") {
|
||||
|
||||
SECTION("it should always start with an open snippet, with queryID 0") {
|
||||
EngineInfoContainerCoordinator testee;
|
||||
QueryId res = testee.closeSnippet();
|
||||
REQUIRE(res == 0);
|
||||
}
|
||||
|
||||
SECTION("it should be able to add more snippets, all giving a different id") {
|
||||
EngineInfoContainerCoordinator testee;
|
||||
|
||||
size_t remote = 1;
|
||||
testee.openSnippet(remote);
|
||||
testee.openSnippet(remote);
|
||||
|
||||
QueryId res1 = testee.closeSnippet();
|
||||
REQUIRE(res1 != 0);
|
||||
|
||||
QueryId res2 = testee.closeSnippet();
|
||||
REQUIRE(res2 != res1);
|
||||
REQUIRE(res2 != 0);
|
||||
|
||||
QueryId res3 = testee.closeSnippet();
|
||||
REQUIRE(res3 == 0);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////
|
||||
// SECTION buildEngines
|
||||
///////////////////////////////////////////
|
||||
|
||||
// Flow:
|
||||
// 1. Clone the query for every snippet but the first
|
||||
// 2. For every snippet:
|
||||
// 1. create new Engine (e)
|
||||
// 2. query->setEngine(e)
|
||||
// 3. query->engine() -> e
|
||||
// 4. engine->setLockedShards()
|
||||
// 5. engine->createBlocks()
|
||||
// 6. Assert (engine->root() != nullptr)
|
||||
// 7. For all but the first:
|
||||
// 1. queryRegistry->insert(_id, query, 600.0);
|
||||
// 2. queryIds.emplace(idOfRemote/dbname, _id);
|
||||
// 3. query->engine();
|
||||
|
||||
SECTION("it should create an ExecutionEngine for the first snippet") {
|
||||
|
||||
std::unordered_set<std::string> const restrictToShards;
|
||||
std::unordered_map<std::string, std::string> queryIds;
|
||||
auto lockedShards = std::make_unique<std::unordered_set<ShardID> const>();
|
||||
std::string const dbname = "TestDB";
|
||||
|
||||
// ------------------------------
|
||||
// Section: Create Mock Instances
|
||||
// ------------------------------
|
||||
Mock<ExecutionNode> singletonMock;
|
||||
ExecutionNode& sNode = singletonMock.get();
|
||||
When(Method(singletonMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionEngine> mockEngine;
|
||||
ExecutionEngine& myEngine = mockEngine.get();
|
||||
|
||||
Mock<ExecutionBlock> rootBlockMock;
|
||||
ExecutionBlock& rootBlock = rootBlockMock.get();
|
||||
|
||||
Mock<Query> mockQuery;
|
||||
Query& query = mockQuery.get();
|
||||
|
||||
Mock<QueryRegistry> mockRegistry;
|
||||
QueryRegistry& registry = mockRegistry.get();
|
||||
|
||||
// ------------------------------
|
||||
// Section: Mock Functions
|
||||
// ------------------------------
|
||||
|
||||
|
||||
When(Method(mockQuery, setEngine)).Do([&](ExecutionEngine* eng) -> void {
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
});
|
||||
|
||||
When(Method(mockQuery, engine)).Return(&myEngine).Return(&myEngine);
|
||||
When(Method(mockEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockEngine, createBlocks)).Return();
|
||||
When(ConstOverloadedMethod(mockEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&rootBlock);
|
||||
|
||||
// ------------------------------
|
||||
// Section: Run the test
|
||||
// ------------------------------
|
||||
|
||||
EngineInfoContainerCoordinator testee;
|
||||
testee.addNode(&sNode);
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
REQUIRE(result.ok());
|
||||
ExecutionEngine* engine = result.engine();
|
||||
|
||||
REQUIRE(engine != nullptr);
|
||||
REQUIRE(engine == &myEngine);
|
||||
|
||||
// The last engine should not be stored
|
||||
// It is not added to the registry
|
||||
REQUIRE(queryIds.empty());
|
||||
|
||||
// Validate that the query is wired up with the engine
|
||||
Verify(Method(mockQuery, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockEngine, createBlocks)).Exactly(1);
|
||||
}
|
||||
|
||||
SECTION("it should create an new engine and register it for second snippet") {
|
||||
std::unordered_set<std::string> const restrictToShards;
|
||||
std::unordered_map<std::string, std::string> queryIds;
|
||||
auto lockedShards = std::make_unique<std::unordered_set<ShardID> const>();
|
||||
|
||||
size_t remoteId = 1337;
|
||||
QueryId secondId = 0;
|
||||
std::string dbname = "TestDB";
|
||||
|
||||
// ------------------------------
|
||||
// Section: Create Mock Instances
|
||||
// ------------------------------
|
||||
Mock<ExecutionNode> firstNodeMock;
|
||||
ExecutionNode& fNode = firstNodeMock.get();
|
||||
When(Method(firstNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionNode> secondNodeMock;
|
||||
ExecutionNode& sNode = secondNodeMock.get();
|
||||
When(Method(secondNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
// We need a block only for assertion
|
||||
Mock<ExecutionBlock> blockMock;
|
||||
ExecutionBlock& block = blockMock.get();
|
||||
|
||||
// Mock engine for first snippet
|
||||
Mock<ExecutionEngine> mockEngine;
|
||||
ExecutionEngine& myEngine = mockEngine.get();
|
||||
|
||||
// Mock engine for second snippet
|
||||
Mock<ExecutionEngine> mockSecondEngine;
|
||||
ExecutionEngine& mySecondEngine = mockSecondEngine.get();
|
||||
|
||||
Mock<Query> mockQuery;
|
||||
Query& query = mockQuery.get();
|
||||
|
||||
Mock<Query> mockQueryClone;
|
||||
Query& queryClone = mockQueryClone.get();
|
||||
|
||||
Mock<QueryRegistry> mockRegistry;
|
||||
QueryRegistry& registry = mockRegistry.get();
|
||||
|
||||
// ------------------------------
|
||||
// Section: Mock Functions
|
||||
// ------------------------------
|
||||
|
||||
|
||||
When(Method(mockQuery, setEngine)).Do([&](ExecutionEngine* eng) -> void {
|
||||
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
});
|
||||
When(Method(mockQuery, engine)).Return(&myEngine).Return(&myEngine);
|
||||
When(Method(mockEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockEngine, createBlocks)).Do([&](
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_map<std::string, std::string> const&) {
|
||||
REQUIRE(nodes.size() == 1);
|
||||
REQUIRE(nodes[0] == &fNode);
|
||||
});
|
||||
When(ConstOverloadedMethod(mockEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
// Mock query clone
|
||||
When(Method(mockQuery, clone)).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &queryClone;
|
||||
});
|
||||
|
||||
When(Method(mockQueryClone, setEngine)).Do([&](ExecutionEngine* eng) -> void {
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
});
|
||||
|
||||
When(Method(mockQueryClone, engine)).Return(&mySecondEngine);
|
||||
When(Method(mockSecondEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
|
||||
When(Method(mockSecondEngine, createBlocks)).Do([&](
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_map<std::string, std::string> const&) {
|
||||
REQUIRE(nodes.size() == 1);
|
||||
REQUIRE(nodes[0] == &sNode);
|
||||
});
|
||||
When(ConstOverloadedMethod(mockSecondEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
// Mock the Registry
|
||||
When(Method(mockRegistry, insert)).Do([&] (QueryId id, Query* query, double timeout) {
|
||||
REQUIRE(id != 0);
|
||||
REQUIRE(query != nullptr);
|
||||
REQUIRE(timeout == 600.0);
|
||||
REQUIRE(query == &queryClone);
|
||||
secondId = id;
|
||||
});
|
||||
|
||||
|
||||
// ------------------------------
|
||||
// Section: Run the test
|
||||
// ------------------------------
|
||||
|
||||
EngineInfoContainerCoordinator testee;
|
||||
testee.addNode(&fNode);
|
||||
|
||||
// Open the Second Snippet
|
||||
testee.openSnippet(remoteId);
|
||||
// Inject a node
|
||||
testee.addNode(&sNode);
|
||||
// Close the second snippet
|
||||
testee.closeSnippet();
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
REQUIRE(result.ok());
|
||||
ExecutionEngine* engine = result.engine();
|
||||
|
||||
REQUIRE(engine != nullptr);
|
||||
REQUIRE(engine == &myEngine);
|
||||
|
||||
// The first engine should not be stored
|
||||
// It is not added to the registry
|
||||
// The second should be
|
||||
REQUIRE(!queryIds.empty());
|
||||
// The second engine needs a generated id
|
||||
REQUIRE(secondId != 0);
|
||||
|
||||
// It is stored in the mapping
|
||||
std::string secIdString = arangodb::basics::StringUtils::itoa(secondId);
|
||||
std::string remIdString = arangodb::basics::StringUtils::itoa(remoteId) + "/" + dbname;
|
||||
REQUIRE(queryIds.find(remIdString) != queryIds.end());
|
||||
REQUIRE(queryIds[remIdString] == secIdString);
|
||||
|
||||
// Validate that the query is wired up with the engine
|
||||
Verify(Method(mockQuery, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate that the second query is wired up with the second engine
|
||||
Verify(Method(mockQueryClone, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockSecondEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockSecondEngine, createBlocks)).Exactly(1);
|
||||
Verify(Method(mockRegistry, insert)).Exactly(1);
|
||||
}
|
||||
|
||||
SECTION("snipets are a stack, insert node always into top snippet") {
|
||||
std::unordered_set<std::string> const restrictToShards;
|
||||
std::unordered_map<std::string, std::string> queryIds;
|
||||
auto lockedShards = std::make_unique<std::unordered_set<ShardID> const>();
|
||||
|
||||
size_t remoteId = 1337;
|
||||
size_t secondRemoteId = 42;
|
||||
QueryId secondId = 0;
|
||||
QueryId thirdId = 0;
|
||||
std::string dbname = "TestDB";
|
||||
|
||||
auto setEngineCallback = [] (ExecutionEngine* eng) -> void {
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
};
|
||||
|
||||
// We test the following:
|
||||
// Base Snippet insert node
|
||||
// New Snippet (A)
|
||||
// Insert Node -> (A)
|
||||
// Close (A)
|
||||
// Insert Node -> Base
|
||||
// New Snippet (B)
|
||||
// Insert Node -> (B)
|
||||
// Close (B)
|
||||
// Insert Node -> Base
|
||||
// Verfiy on Engines
|
||||
|
||||
// ------------------------------
|
||||
// Section: Create Mock Instances
|
||||
// ------------------------------
|
||||
|
||||
Mock<ExecutionNode> firstBaseNodeMock;
|
||||
ExecutionNode& fbNode = firstBaseNodeMock.get();
|
||||
When(Method(firstBaseNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionNode> snipANodeMock;
|
||||
ExecutionNode& aNode = snipANodeMock.get();
|
||||
When(Method(snipANodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionNode> secondBaseNodeMock;
|
||||
ExecutionNode& sbNode = secondBaseNodeMock.get();
|
||||
When(Method(secondBaseNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionNode> snipBNodeMock;
|
||||
ExecutionNode& bNode = snipBNodeMock.get();
|
||||
When(Method(snipBNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
Mock<ExecutionNode> thirdBaseNodeMock;
|
||||
ExecutionNode& tbNode = thirdBaseNodeMock.get();
|
||||
When(Method(thirdBaseNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
// We need a block only for assertion
|
||||
Mock<ExecutionBlock> blockMock;
|
||||
ExecutionBlock& block = blockMock.get();
|
||||
|
||||
// Mock engine for first snippet
|
||||
Mock<ExecutionEngine> mockEngine;
|
||||
ExecutionEngine& myEngine = mockEngine.get();
|
||||
|
||||
// Mock engine for second snippet
|
||||
Mock<ExecutionEngine> mockSecondEngine;
|
||||
ExecutionEngine& mySecondEngine = mockSecondEngine.get();
|
||||
|
||||
// Mock engine for second snippet
|
||||
Mock<ExecutionEngine> mockThirdEngine;
|
||||
ExecutionEngine& myThirdEngine = mockThirdEngine.get();
|
||||
|
||||
Mock<Query> mockQuery;
|
||||
Query& query = mockQuery.get();
|
||||
|
||||
// We need two query clones
|
||||
Mock<Query> mockQueryClone;
|
||||
Query& queryClone = mockQueryClone.get();
|
||||
|
||||
Mock<Query> mockQuerySecondClone;
|
||||
Query& querySecondClone = mockQuerySecondClone.get();
|
||||
|
||||
Mock<QueryRegistry> mockRegistry;
|
||||
QueryRegistry& registry = mockRegistry.get();
|
||||
|
||||
// ------------------------------
|
||||
// Section: Mock Functions
|
||||
// ------------------------------
|
||||
|
||||
When(Method(mockQuery, setEngine)).Do(setEngineCallback);
|
||||
When(Method(mockQuery, engine)).Return(&myEngine).Return(&myEngine);
|
||||
When(Method(mockEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockEngine, createBlocks)).Do([&](
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_map<std::string, std::string> const&) {
|
||||
REQUIRE(nodes.size() == 3);
|
||||
REQUIRE(nodes[0] == &fbNode);
|
||||
REQUIRE(nodes[1] == &sbNode);
|
||||
REQUIRE(nodes[2] == &tbNode);
|
||||
});
|
||||
When(ConstOverloadedMethod(mockEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
When(Method(mockQuery, clone)).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &queryClone;
|
||||
}).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &querySecondClone;
|
||||
});
|
||||
|
||||
|
||||
// Mock first clone
|
||||
When(Method(mockQueryClone, setEngine)).Do(setEngineCallback);
|
||||
When(Method(mockQueryClone, engine)).Return(&mySecondEngine);
|
||||
When(Method(mockSecondEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockSecondEngine, createBlocks)).Do([&](
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_map<std::string, std::string> const&) {
|
||||
REQUIRE(nodes.size() == 1);
|
||||
REQUIRE(nodes[0] == &aNode);
|
||||
});
|
||||
When(ConstOverloadedMethod(mockSecondEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
// Mock second clone
|
||||
When(Method(mockQuerySecondClone, setEngine)).Do(setEngineCallback);
|
||||
When(Method(mockQuerySecondClone, engine)).Return(&myThirdEngine);
|
||||
When(Method(mockThirdEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockThirdEngine, createBlocks)).Do([&](
|
||||
std::vector<ExecutionNode*> const& nodes,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_set<std::string> const&,
|
||||
std::unordered_map<std::string, std::string> const&) {
|
||||
REQUIRE(nodes.size() == 1);
|
||||
REQUIRE(nodes[0] == &bNode);
|
||||
});
|
||||
When(ConstOverloadedMethod(mockThirdEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
// Mock the Registry
|
||||
// NOTE: This expects an ordering of the engines first of the stack will be handled
|
||||
// first. With same fakeit magic we could make this ordering independent which is
|
||||
// is fine as well for the production code.
|
||||
When(Method(mockRegistry, insert)).Do([&] (QueryId id, Query* query, double timeout) {
|
||||
REQUIRE(id != 0);
|
||||
REQUIRE(query != nullptr);
|
||||
REQUIRE(timeout == 600.0);
|
||||
REQUIRE(query == &queryClone);
|
||||
secondId = id;
|
||||
}).Do([&] (QueryId id, Query* query, double timeout) {
|
||||
REQUIRE(id != 0);
|
||||
REQUIRE(query != nullptr);
|
||||
REQUIRE(timeout == 600.0);
|
||||
REQUIRE(query == &querySecondClone);
|
||||
thirdId = id;
|
||||
});
|
||||
|
||||
|
||||
// ------------------------------
|
||||
// Section: Run the test
|
||||
// ------------------------------
|
||||
EngineInfoContainerCoordinator testee;
|
||||
|
||||
testee.addNode(&fbNode);
|
||||
|
||||
testee.openSnippet(remoteId);
|
||||
testee.addNode(&aNode);
|
||||
testee.closeSnippet();
|
||||
|
||||
testee.addNode(&sbNode);
|
||||
|
||||
testee.openSnippet(secondRemoteId);
|
||||
testee.addNode(&bNode);
|
||||
testee.closeSnippet();
|
||||
|
||||
testee.addNode(&tbNode);
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
|
||||
REQUIRE(result.ok());
|
||||
ExecutionEngine* engine = result.engine();
|
||||
REQUIRE(engine != nullptr);
|
||||
REQUIRE(engine == &myEngine);
|
||||
|
||||
// The first engine should not be stored
|
||||
// It is not added to the registry
|
||||
// The other two should be
|
||||
REQUIRE(queryIds.size() == 2);
|
||||
|
||||
// First (A) is stored in the mapping
|
||||
std::string secIdString = arangodb::basics::StringUtils::itoa(secondId);
|
||||
std::string remIdString = arangodb::basics::StringUtils::itoa(remoteId) + "/" + dbname;
|
||||
REQUIRE(queryIds.find(remIdString) != queryIds.end());
|
||||
REQUIRE(queryIds[remIdString] == secIdString);
|
||||
|
||||
// Second (B) is stored in the mapping
|
||||
std::string thirdIdString = arangodb::basics::StringUtils::itoa(thirdId);
|
||||
std::string secRemIdString = arangodb::basics::StringUtils::itoa(secondRemoteId) + "/" + dbname;
|
||||
REQUIRE(queryIds.find(secRemIdString) != queryIds.end());
|
||||
REQUIRE(queryIds[secRemIdString] == thirdIdString);
|
||||
|
||||
// Validate that the query is wired up with the engine
|
||||
Verify(Method(mockQuery, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate that the second query is wired up with the second engine
|
||||
Verify(Method(mockQueryClone, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockSecondEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockSecondEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate that the second query is wired up with the second engine
|
||||
Verify(Method(mockQuerySecondClone, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockThirdEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockThirdEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate two queries are registered correctly
|
||||
Verify(Method(mockRegistry, insert)).Exactly(2);
|
||||
}
|
||||
|
||||
SECTION("error cases") {
|
||||
std::unordered_set<std::string> const restrictToShards;
|
||||
std::unordered_map<std::string, std::string> queryIds;
|
||||
auto lockedShards = std::make_unique<std::unordered_set<ShardID> const>();
|
||||
|
||||
size_t remoteId = 1337;
|
||||
QueryId secondId = 0;
|
||||
std::string dbname = "TestDB";
|
||||
|
||||
// ------------------------------
|
||||
// Section: Create Mock Instances
|
||||
// ------------------------------
|
||||
Mock<ExecutionNode> firstNodeMock;
|
||||
ExecutionNode& fNode = firstNodeMock.get();
|
||||
When(Method(firstNodeMock, getType)).AlwaysReturn(ExecutionNode::SINGLETON);
|
||||
|
||||
// We need a block only for assertion
|
||||
Mock<ExecutionBlock> blockMock;
|
||||
ExecutionBlock& block = blockMock.get();
|
||||
|
||||
// Mock engine for first snippet
|
||||
Mock<ExecutionEngine> mockEngine;
|
||||
ExecutionEngine& myEngine = mockEngine.get();
|
||||
|
||||
// Mock engine for second snippet
|
||||
Mock<ExecutionEngine> mockSecondEngine;
|
||||
ExecutionEngine& mySecondEngine = mockSecondEngine.get();
|
||||
|
||||
Mock<Query> mockQuery;
|
||||
Query& query = mockQuery.get();
|
||||
|
||||
Mock<Query> mockQueryClone;
|
||||
Query& queryClone = mockQueryClone.get();
|
||||
|
||||
Mock<QueryRegistry> mockRegistry;
|
||||
QueryRegistry& registry = mockRegistry.get();
|
||||
|
||||
// ------------------------------
|
||||
// Section: Mock Functions
|
||||
// ------------------------------
|
||||
|
||||
|
||||
When(Method(mockQuery, setEngine)).Do([&](ExecutionEngine* eng) -> void {
|
||||
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
});
|
||||
When(Method(mockQuery, engine)).Return(&myEngine).Return(&myEngine);
|
||||
When(Method(mockEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockEngine, createBlocks)).AlwaysReturn();
|
||||
When(ConstOverloadedMethod(mockEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
When(Method(mockQueryClone, setEngine)).Do([&](ExecutionEngine* eng) -> void {
|
||||
// We expect that the snippet injects a new engine into our
|
||||
// query.
|
||||
// However we have to return a mocked engine later
|
||||
REQUIRE(eng != nullptr);
|
||||
// Throw it away
|
||||
delete eng;
|
||||
});
|
||||
|
||||
When(Method(mockQueryClone, engine)).Return(&mySecondEngine);
|
||||
When(Method(mockSecondEngine, setLockedShards)).AlwaysDo([&](std::unordered_set<std::string>* lockedShards) {
|
||||
REQUIRE(lockedShards != nullptr);
|
||||
delete lockedShards; // This is a copy
|
||||
return;
|
||||
});
|
||||
When(Method(mockSecondEngine, createBlocks)).AlwaysReturn();
|
||||
When(ConstOverloadedMethod(mockSecondEngine, root, ExecutionBlock* ()))
|
||||
.AlwaysReturn(&block);
|
||||
|
||||
When(OverloadedMethod(mockRegistry, destroy, void(std::string const&, QueryId, int))).Do([&]
|
||||
(std::string const& vocbase, QueryId id, int errorCode) {
|
||||
REQUIRE(vocbase == dbname);
|
||||
REQUIRE(id == secondId);
|
||||
REQUIRE(errorCode == TRI_ERROR_INTERNAL);
|
||||
});
|
||||
|
||||
// ------------------------------
|
||||
// Section: Run the test
|
||||
// ------------------------------
|
||||
|
||||
EngineInfoContainerCoordinator testee;
|
||||
testee.addNode(&fNode);
|
||||
|
||||
// Open the Second Snippet
|
||||
testee.openSnippet(remoteId);
|
||||
// Inject a node
|
||||
testee.addNode(&fNode);
|
||||
|
||||
testee.openSnippet(remoteId);
|
||||
// Inject a node
|
||||
testee.addNode(&fNode);
|
||||
|
||||
// Close the third snippet
|
||||
testee.closeSnippet();
|
||||
|
||||
// Close the second snippet
|
||||
testee.closeSnippet();
|
||||
|
||||
SECTION("cloning of a query fails") {
|
||||
// Mock the Registry
|
||||
When(Method(mockRegistry, insert)).Do([&] (QueryId id, Query* query, double timeout) {
|
||||
REQUIRE(id != 0);
|
||||
REQUIRE(query != nullptr);
|
||||
REQUIRE(timeout == 600.0);
|
||||
REQUIRE(query == &queryClone);
|
||||
secondId = id;
|
||||
});
|
||||
|
||||
SECTION("it throws an error") {
|
||||
// Mock query clone
|
||||
When(Method(mockQuery, clone)).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &queryClone;
|
||||
}).Throw(arangodb::basics::Exception(TRI_ERROR_DEBUG, __FILE__, __LINE__));
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
REQUIRE(!result.ok());
|
||||
// Make sure we check the right thing here
|
||||
REQUIRE(result.errorNumber() == TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
SECTION("it returns nullptr") {
|
||||
// Mock query clone
|
||||
When(Method(mockQuery, clone)).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &queryClone;
|
||||
}).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return nullptr;
|
||||
});
|
||||
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
REQUIRE(!result.ok());
|
||||
// Make sure we check the right thing here
|
||||
REQUIRE(result.errorNumber() == TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
// Validate that the path up to intended error was taken
|
||||
|
||||
// Validate that the query is wired up with the engine
|
||||
Verify(Method(mockQuery, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate that the second query is wired up with the second engine
|
||||
Verify(Method(mockQueryClone, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockSecondEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockSecondEngine, createBlocks)).Exactly(1);
|
||||
Verify(Method(mockRegistry, insert)).Exactly(1);
|
||||
|
||||
// Assert unregister of second engine.
|
||||
Verify(OverloadedMethod(mockRegistry, destroy, void(std::string const&, QueryId, int))).Exactly(1);
|
||||
}
|
||||
|
||||
GIVEN("inserting into the Registry fails") {
|
||||
When(Method(mockSecondEngine, getQuery)).Do([&]() -> Query* {
|
||||
return &queryClone;
|
||||
});
|
||||
When(Method(mockQuery, clone)).Do([&](QueryPart part, bool withPlan) -> Query* {
|
||||
REQUIRE(part == PART_DEPENDENT);
|
||||
REQUIRE(withPlan == false);
|
||||
return &queryClone;
|
||||
});
|
||||
|
||||
When(Dtor(mockQueryClone)).Do([]() { })
|
||||
.Throw(arangodb::basics::Exception(TRI_ERROR_DEBUG, __FILE__, __LINE__));
|
||||
|
||||
WHEN("it throws an exception") {
|
||||
When(Method(mockRegistry, insert))
|
||||
.Throw(
|
||||
arangodb::basics::Exception(TRI_ERROR_DEBUG, __FILE__, __LINE__));
|
||||
|
||||
}
|
||||
|
||||
ExecutionEngineResult result = testee.buildEngines(
|
||||
&query, ®istry, dbname, restrictToShards, queryIds, lockedShards.get()
|
||||
);
|
||||
REQUIRE(!result.ok());
|
||||
// Make sure we check the right thing here
|
||||
REQUIRE(result.errorNumber() == TRI_ERROR_DEBUG);
|
||||
|
||||
// Validate that the path up to intended error was taken
|
||||
|
||||
// Validate that the query is wired up with the engine
|
||||
Verify(Method(mockQuery, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockEngine, createBlocks)).Exactly(1);
|
||||
|
||||
// Validate that the second query is wired up with the second engine
|
||||
Verify(Method(mockQueryClone, setEngine)).Exactly(1);
|
||||
// Validate that lockedShards and createBlocks have been called!
|
||||
Verify(Method(mockSecondEngine, setLockedShards)).Exactly(1);
|
||||
Verify(Method(mockSecondEngine, createBlocks)).Exactly(1);
|
||||
Verify(Method(mockRegistry, insert)).Exactly(1);
|
||||
|
||||
// Assert unregister of second engine.
|
||||
Verify(OverloadedMethod(mockRegistry, destroy, void(std::string const&, QueryId, int))).Exactly(0);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // test
|
||||
} // aql
|
||||
} // arangodb
|
|
@ -0,0 +1,231 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test case for RestAqlHandler
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
/// @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "fakeit.hpp"
|
||||
|
||||
#include "Aql/RestAqlHandler.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "Cluster/TraverserEngineRegistry.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
using namespace arangodb::traverser;
|
||||
|
||||
namespace arangodb {
|
||||
namespace tests {
|
||||
namespace rest_aql_handler_test {
|
||||
|
||||
class FakeResponse : public GeneralResponse {
|
||||
public:
|
||||
FakeResponse()
|
||||
: GeneralResponse(rest::ResponseCode::SERVER_ERROR),
|
||||
_transport(Endpoint::TransportType::VST) {}
|
||||
|
||||
FakeResponse(Endpoint::TransportType transport)
|
||||
: GeneralResponse(rest::ResponseCode::SERVER_ERROR),
|
||||
_transport(transport) {}
|
||||
|
||||
~FakeResponse() {}
|
||||
|
||||
arangodb::Endpoint::TransportType transportType() override {
|
||||
return _transport;
|
||||
};
|
||||
|
||||
void reset(rest::ResponseCode code) override {
|
||||
_responseCode = code;
|
||||
}
|
||||
|
||||
void addPayload(VPackSlice const&,
|
||||
arangodb::velocypack::Options const* = nullptr,
|
||||
bool resolveExternals = true) {
|
||||
// TODO
|
||||
};
|
||||
void addPayload(VPackBuffer<uint8_t>&&,
|
||||
arangodb::velocypack::Options const* = nullptr,
|
||||
bool resolveExternals = true) {
|
||||
// TODO
|
||||
};
|
||||
|
||||
private:
|
||||
arangodb::Endpoint::TransportType const _transport;
|
||||
};
|
||||
|
||||
SCENARIO("Successful query setup", "[aql][restaqlhandler]") {
|
||||
|
||||
// We always work on DBServer
|
||||
ServerState::instance()->setRole(ServerState::ROLE_PRIMARY);
|
||||
auto body = std::make_shared<VPackBuilder>();
|
||||
|
||||
std::string dbName = "UnitTestDB";
|
||||
std::string user = "MyUser";
|
||||
std::unordered_map<std::string, std::string> req_headers;
|
||||
|
||||
// only test setup
|
||||
std::vector<std::string> suffixes{"setup"};
|
||||
// setup only allows POST
|
||||
rest::RequestType reqType = rest::RequestType::POST;
|
||||
|
||||
// Base setup of a request
|
||||
fakeit::Mock<GeneralRequest> reqMock;
|
||||
GeneralRequest& req = reqMock.get();
|
||||
fakeit::When(
|
||||
ConstOverloadedMethod(reqMock, header,
|
||||
std::string const&(std::string const&, bool&)))
|
||||
.AlwaysDo([&](std::string const& key, bool& found) -> std::string const& {
|
||||
auto it = req_headers.find(key);
|
||||
if (it == req_headers.end()) {
|
||||
found = false;
|
||||
return StaticStrings::Empty;
|
||||
} else {
|
||||
found = true;
|
||||
return it->second;
|
||||
}
|
||||
});
|
||||
fakeit::When(Method(reqMock, databaseName)).AlwaysReturn(dbName);
|
||||
fakeit::When(Method(reqMock, user)).AlwaysReturn(user);
|
||||
fakeit::When(Method(reqMock, suffixes)).AlwaysDo([&] () -> std::vector<std::string> const& {
|
||||
return suffixes;
|
||||
});
|
||||
fakeit::When(Method(reqMock, requestType)).AlwaysDo([&] () -> rest::RequestType {
|
||||
return reqType;
|
||||
});
|
||||
fakeit::When(Method(reqMock, toVelocyPackBuilderPtr)).AlwaysDo([&] () -> std::shared_ptr<VPackBuilder> {
|
||||
return body;
|
||||
});
|
||||
|
||||
fakeit::When(Dtor(reqMock)).Do([] () {} )
|
||||
.Throw(arangodb::basics::Exception(TRI_ERROR_DEBUG, __FILE__, __LINE__));
|
||||
|
||||
|
||||
fakeit::Mock<VocbaseContext> ctxtMock;
|
||||
VocbaseContext& ctxt = ctxtMock.get();;
|
||||
|
||||
fakeit::Mock<TRI_vocbase_t> vocbaseMock;
|
||||
TRI_vocbase_t& vocbase = vocbaseMock.get();
|
||||
fakeit::When(Method(reqMock, requestContext)).AlwaysReturn(&ctxt);
|
||||
fakeit::When(Method(ctxtMock, vocbase)).AlwaysReturn(&vocbase);
|
||||
|
||||
|
||||
// Base setup of a response
|
||||
|
||||
// Base setup of the registries
|
||||
fakeit::Mock<QueryRegistry> queryRegMock;
|
||||
QueryRegistry& queryReg = queryRegMock.get();
|
||||
|
||||
fakeit::Mock<TraverserEngineRegistry> travRegMock;
|
||||
TraverserEngineRegistry& travReg = travRegMock.get();
|
||||
|
||||
std::pair<QueryRegistry*, TraverserEngineRegistry*> engines{&queryReg, &travReg};
|
||||
|
||||
// The testee takes ownership of response!
|
||||
// It stays valid until testee is destroyed
|
||||
FakeResponse* res = new FakeResponse();
|
||||
|
||||
// Build the handler
|
||||
RestAqlHandler testee(&req, res, &engines);
|
||||
|
||||
THEN("It should give the correct name") {
|
||||
REQUIRE(std::string(testee.name()) == "RestAqlHandler");
|
||||
}
|
||||
|
||||
THEN("It should never be direct") {
|
||||
REQUIRE(testee.isDirect() == false);
|
||||
}
|
||||
|
||||
GIVEN("A single query snippet") {
|
||||
|
||||
// {
|
||||
// lockInfo: {
|
||||
// READ: [<collections to read-lock],
|
||||
// WRITE: [<collections to write-lock]
|
||||
// },
|
||||
// options: { < query options > },
|
||||
// snippets: {
|
||||
// <queryId: {nodes: [ <nodes>]}>
|
||||
// },
|
||||
// variables: [ <variables> ]
|
||||
// }
|
||||
|
||||
|
||||
body->openObject();
|
||||
body->add(VPackValue("lockInfo"));
|
||||
body->openObject();
|
||||
body->close();
|
||||
|
||||
body->add(VPackValue("options"));
|
||||
body->openObject();
|
||||
body->close();
|
||||
|
||||
body->add(VPackValue("snippets"));
|
||||
body->openObject();
|
||||
body->close();
|
||||
|
||||
body->add(VPackValue("variables"));
|
||||
body->openArray();
|
||||
body->close();
|
||||
|
||||
body->close();
|
||||
RestStatus status = testee.execute();
|
||||
|
||||
THEN("It should succeed") {
|
||||
REQUIRE(!status.isFailed());
|
||||
REQUIRE(res->responseCode() == rest::ResponseCode::OK);
|
||||
}
|
||||
}
|
||||
|
||||
GIVEN("A list of query snippets") {
|
||||
}
|
||||
|
||||
GIVEN("A single traverser engine") {
|
||||
}
|
||||
|
||||
GIVEN("A traverser engine and a query snippet") {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SCENARIO("Error in query setup", "[aql][restaqlhandler]") {
|
||||
GIVEN("A single query snippet") {
|
||||
}
|
||||
|
||||
GIVEN("A list of query snippets") {
|
||||
}
|
||||
|
||||
GIVEN("A single traverser engine") {
|
||||
}
|
||||
|
||||
GIVEN("A traverser engine and a query snippet") {
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -48,7 +48,9 @@ if (USE_IRESEARCH)
|
|||
IResearch/ExpressionContextMock.cpp
|
||||
IResearch/VelocyPackHelper-test.cpp
|
||||
IResearch/ExecutionBlockMock-test.cpp
|
||||
Utils/CollectionNameResolver-test.cpp
|
||||
VocBase/LogicalDataSource-test.cpp
|
||||
VocBase/vocbase-test.cpp
|
||||
)
|
||||
endif ()
|
||||
|
||||
|
@ -64,6 +66,8 @@ add_executable(
|
|||
Agency/RemoveFollowerTest.cpp
|
||||
Agency/StoreTest.cpp
|
||||
Agency/SupervisionTest.cpp
|
||||
Aql/EngineInfoContainerCoordinatorTest.cpp
|
||||
Aql/RestAqlHandlerTest.cpp
|
||||
Auth/UserManagerTest.cpp
|
||||
Basics/icu-helper.cpp
|
||||
Basics/ApplicationServerTest.cpp
|
||||
|
|
|
@ -374,12 +374,19 @@ SECTION("test_async_index") {
|
|||
// populate collections asynchronously
|
||||
{
|
||||
std::thread thread0([collection0, &resThread0]()->void {
|
||||
irs::utf8_path resource;
|
||||
resource/=irs::string_ref(IResearch_test_resource_dir);
|
||||
resource/=irs::string_ref("simple_sequential.json");
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
try {
|
||||
irs::utf8_path resource;
|
||||
|
||||
resource/=irs::string_ref(IResearch_test_resource_dir);
|
||||
resource/=irs::string_ref("simple_sequential.json");
|
||||
builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(resource.utf8());
|
||||
} catch (...) {
|
||||
return; // velocyPackFromFile(...) may throw exception
|
||||
}
|
||||
|
||||
auto doc = arangodb::velocypack::Parser::fromJson("{ \"seq\": 40, \"same\": \"xyz\", \"duplicated\": \"abcd\" }");
|
||||
auto builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(resource.utf8());
|
||||
auto slice = builder.slice();
|
||||
resThread0 = slice.isArray();
|
||||
if (!resThread0) return;
|
||||
|
@ -405,12 +412,19 @@ SECTION("test_async_index") {
|
|||
});
|
||||
|
||||
std::thread thread1([collection1, &resThread1]()->void {
|
||||
irs::utf8_path resource;
|
||||
resource/=irs::string_ref(IResearch_test_resource_dir);
|
||||
resource/=irs::string_ref("simple_sequential.json");
|
||||
arangodb::velocypack::Builder builder;
|
||||
|
||||
try {
|
||||
irs::utf8_path resource;
|
||||
|
||||
resource/=irs::string_ref(IResearch_test_resource_dir);
|
||||
resource/=irs::string_ref("simple_sequential.json");
|
||||
builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(resource.utf8());
|
||||
} catch (...) {
|
||||
return; // velocyPackFromFile(...) may throw exception
|
||||
}
|
||||
|
||||
auto doc = arangodb::velocypack::Parser::fromJson("{ \"seq\": 50, \"same\": \"xyz\", \"duplicated\": \"abcd\" }");
|
||||
auto builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(resource.utf8());
|
||||
auto slice = builder.slice();
|
||||
resThread1 = slice.isArray();
|
||||
if (!resThread1) return;
|
||||
|
@ -627,4 +641,4 @@ SECTION("test_fields") {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -188,12 +188,12 @@ SECTION("test_inheritDefaults") {
|
|||
|
||||
analyzers.start();
|
||||
|
||||
defaults._fields["abc"] = std::move(arangodb::iresearch::IResearchLinkMeta());
|
||||
defaults._fields["abc"] = arangodb::iresearch::IResearchLinkMeta();
|
||||
defaults._includeAllFields = true;
|
||||
defaults._trackListPositions = true;
|
||||
defaults._analyzers.clear();
|
||||
defaults._analyzers.emplace_back(analyzers.ensure("empty"));
|
||||
defaults._fields["abc"]->_fields["xyz"] = std::move(arangodb::iresearch::IResearchLinkMeta());
|
||||
defaults._fields["abc"]->_fields["xyz"] = arangodb::iresearch::IResearchLinkMeta();
|
||||
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{}");
|
||||
CHECK(true == meta.init(json->slice(), tmpString, defaults));
|
||||
|
@ -410,8 +410,8 @@ SECTION("test_writeCustomizedValues") {
|
|||
auto& overrideNone = *(meta._fields["c"]->_fields["none"]);
|
||||
|
||||
overrideAll._fields.clear(); // do not inherit fields to match jSon inheritance
|
||||
overrideAll._fields["x"] = std::move(arangodb::iresearch::IResearchLinkMeta());
|
||||
overrideAll._fields["y"] = std::move(arangodb::iresearch::IResearchLinkMeta());
|
||||
overrideAll._fields["x"] = arangodb::iresearch::IResearchLinkMeta();
|
||||
overrideAll._fields["y"] = arangodb::iresearch::IResearchLinkMeta();
|
||||
overrideAll._includeAllFields = false;
|
||||
overrideAll._trackListPositions = false;
|
||||
overrideAll._analyzers.clear();
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "Enterprise/Ldap/LdapFeature.h"
|
||||
#endif
|
||||
|
||||
#include "Basics/files.h"
|
||||
#include "V8/v8-globals.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
|
@ -132,6 +133,13 @@ struct IResearchQuerySetup {
|
|||
analyzers->emplace("test_analyzer", "TestAnalyzer", "abc"); // cache analyzer
|
||||
analyzers->emplace("test_csv_analyzer", "TestDelimAnalyzer", ","); // cache analyzer
|
||||
|
||||
auto* dbPathFeature = arangodb::application_features::ApplicationServer::getFeature<arangodb::DatabasePathFeature>("DatabasePath");
|
||||
irs::utf8_path testFilesystemPath;
|
||||
|
||||
testFilesystemPath /= TRI_GetTempPath();
|
||||
testFilesystemPath /= std::string("arangodb_tests.") + std::to_string(TRI_microtime());
|
||||
const_cast<std::string&>(dbPathFeature->directory()) = testFilesystemPath.utf8();
|
||||
|
||||
// suppress log messages since tests check error conditions
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::FIXME.name(), arangodb::LogLevel::ERR); // suppress WARNING DefaultCustomTypeHandler called
|
||||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::IResearchFeature::IRESEARCH.name(), arangodb::LogLevel::FATAL);
|
||||
|
|
|
@ -0,0 +1,263 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Andrey Abramov
|
||||
/// @author Vasiliy Nabatchikov
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "../IResearch/common.h"
|
||||
#include "../IResearch/StorageEngineMock.h"
|
||||
#include "IResearch/ApplicationServerHelper.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/QueryRegistryFeature.h"
|
||||
#include "RestServer/ViewTypesFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
#include "velocypack/Parser.h"
|
||||
|
||||
namespace {
|
||||
|
||||
std::unique_ptr<arangodb::ViewImplementation> makeTestView(
|
||||
arangodb::LogicalView* view,
|
||||
arangodb::velocypack::Slice const& info,
|
||||
bool isNew
|
||||
) {
|
||||
struct Impl: public arangodb::ViewImplementation {
|
||||
Impl(): ViewImplementation(nullptr, arangodb::velocypack::Slice::emptyObjectSlice()) {
|
||||
}
|
||||
virtual void drop() override {}
|
||||
virtual void getPropertiesVPack(
|
||||
arangodb::velocypack::Builder&, bool
|
||||
) const override {
|
||||
}
|
||||
virtual void open() override {}
|
||||
virtual arangodb::Result updateProperties(
|
||||
arangodb::velocypack::Slice const&, bool, bool
|
||||
) override {
|
||||
return arangodb::Result();
|
||||
}
|
||||
virtual bool visitCollections(
|
||||
std::function<bool(TRI_voc_cid_t)> const&
|
||||
) const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
return std::unique_ptr<arangodb::ViewImplementation>(new Impl());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- setup / tear-down
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct CollectionNameResolverSetup {
|
||||
StorageEngineMock engine;
|
||||
arangodb::application_features::ApplicationServer server;
|
||||
std::vector<std::pair<arangodb::application_features::ApplicationFeature*, bool>> features;
|
||||
|
||||
CollectionNameResolverSetup(): server(nullptr, nullptr) {
|
||||
arangodb::EngineSelectorFeature::ENGINE = &engine;
|
||||
|
||||
// setup required application features
|
||||
features.emplace_back(new arangodb::DatabaseFeature(&server), false); // required for TRI_vocbase_t::dropCollection(...)
|
||||
features.emplace_back(new arangodb::QueryRegistryFeature(&server), false); // required for TRI_vocbase_t instantiation
|
||||
features.emplace_back(new arangodb::ViewTypesFeature(&server), false); // required for TRI_vocbase_t::createView(...)
|
||||
|
||||
for (auto& f: features) {
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(f.first);
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->prepare();
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
if (f.second) {
|
||||
f.first->start();
|
||||
}
|
||||
}
|
||||
|
||||
// register view factory
|
||||
arangodb::iresearch::getFeature<arangodb::ViewTypesFeature>()->emplace(
|
||||
arangodb::LogicalDataSource::Type::emplace(
|
||||
arangodb::velocypack::StringRef("testViewType")
|
||||
),
|
||||
makeTestView
|
||||
);
|
||||
}
|
||||
|
||||
~CollectionNameResolverSetup() {
|
||||
arangodb::application_features::ApplicationServer::server = nullptr;
|
||||
arangodb::EngineSelectorFeature::ENGINE = nullptr;
|
||||
|
||||
// destroy application features
|
||||
for (auto& f: features) {
|
||||
if (f.second) {
|
||||
f.first->stop();
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->unprepare();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- test suite
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief setup
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST_CASE("CollectionNameResolverTest", "[vocbase]") {
|
||||
CollectionNameResolverSetup s;
|
||||
(void)(s);
|
||||
|
||||
SECTION("test_getDataSource") {
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"globallyUniqueId\": \"testCollectionGUID\", \"id\": 100, \"name\": \"testCollection\" }");
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": 200, \"name\": \"testView\", \"type\": \"testViewType\" }"); // any arbitrary view type
|
||||
Vocbase vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
arangodb::CollectionNameResolver resolver(&vocbase);
|
||||
|
||||
// not present collection (no datasource)
|
||||
{
|
||||
CHECK((true == !resolver.getDataSource(100)));
|
||||
CHECK((true == !resolver.getDataSource("100")));
|
||||
CHECK((true == !resolver.getDataSource("testCollection")));
|
||||
CHECK((true == !resolver.getDataSource("testCollectionGUID")));
|
||||
CHECK((true == !resolver.getCollection(100)));
|
||||
CHECK((true == !resolver.getCollection("100")));
|
||||
CHECK((true == !resolver.getCollection("testCollection")));
|
||||
CHECK((true == !resolver.getCollection("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// not present view (no datasource)
|
||||
{
|
||||
CHECK((true == !resolver.getDataSource(200)));
|
||||
CHECK((true == !resolver.getDataSource("200")));
|
||||
CHECK((true == !resolver.getDataSource("testView")));
|
||||
CHECK((true == !resolver.getDataSource("testViewGUID")));
|
||||
CHECK((true == !resolver.getView(200)));
|
||||
CHECK((true == !resolver.getView("200")));
|
||||
CHECK((true == !resolver.getView("testView")));
|
||||
CHECK((true == !resolver.getView("testViewGUID")));
|
||||
}
|
||||
|
||||
auto* collection = vocbase.createCollection(collectionJson->slice());
|
||||
auto view = vocbase.createView(viewJson->slice(), 42);
|
||||
|
||||
CHECK((false == collection->deleted()));
|
||||
CHECK((false == view->deleted()));
|
||||
|
||||
// not present collection (is view)
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(200)));
|
||||
CHECK((false == !resolver.getDataSource("200")));
|
||||
CHECK((false == !resolver.getDataSource("testView")));
|
||||
CHECK((true == !resolver.getDataSource("testViewGUID")));
|
||||
CHECK((true == !resolver.getCollection(200)));
|
||||
CHECK((true == !resolver.getCollection("200")));
|
||||
CHECK((true == !resolver.getCollection("testView")));
|
||||
CHECK((true == !resolver.getCollection("testViewGUID")));
|
||||
}
|
||||
|
||||
// not preset view (is collection)
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(100)));
|
||||
CHECK((false == !resolver.getDataSource("100")));
|
||||
CHECK((false == !resolver.getDataSource("testCollection")));
|
||||
CHECK((false == !resolver.getDataSource("testCollectionGUID")));
|
||||
CHECK((true == !resolver.getView(100)));
|
||||
CHECK((true == !resolver.getView("100")));
|
||||
CHECK((true == !resolver.getView("testCollection")));
|
||||
CHECK((true == !resolver.getView("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// present collection
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(100)));
|
||||
CHECK((false == !resolver.getDataSource("100")));
|
||||
CHECK((false == !resolver.getDataSource("testCollection")));
|
||||
CHECK((false == !resolver.getDataSource("testCollectionGUID")));
|
||||
CHECK((false == !resolver.getCollection(100)));
|
||||
CHECK((false == !resolver.getCollection("100")));
|
||||
CHECK((false == !resolver.getCollection("testCollection")));
|
||||
CHECK((false == !resolver.getCollection("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// present view
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(200)));
|
||||
CHECK((false == !resolver.getDataSource("200")));
|
||||
CHECK((false == !resolver.getDataSource("testView")));
|
||||
CHECK((true == !resolver.getDataSource("testViewGUID")));
|
||||
CHECK((false == !resolver.getView(200)));
|
||||
CHECK((false == !resolver.getView("200")));
|
||||
CHECK((false == !resolver.getView("testView")));
|
||||
CHECK((true == !resolver.getView("testViewGUID")));
|
||||
}
|
||||
|
||||
CHECK((TRI_ERROR_NO_ERROR == vocbase.dropCollection(collection, true, 0)));
|
||||
CHECK((TRI_ERROR_NO_ERROR == vocbase.dropView(view)));
|
||||
CHECK((true == collection->deleted()));
|
||||
CHECK((true == view->deleted()));
|
||||
|
||||
// present collection (deleted, cached)
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(100)));
|
||||
CHECK((false == !resolver.getDataSource("100")));
|
||||
CHECK((false == !resolver.getDataSource("testCollection")));
|
||||
CHECK((false == !resolver.getDataSource("testCollectionGUID")));
|
||||
CHECK((false == !resolver.getCollection(100)));
|
||||
CHECK((false == !resolver.getCollection("100")));
|
||||
CHECK((false == !resolver.getCollection("testCollection")));
|
||||
CHECK((false == !resolver.getCollection("testCollectionGUID")));
|
||||
CHECK((true == resolver.getCollection(100)->deleted()));
|
||||
}
|
||||
|
||||
// present view (deleted, cached)
|
||||
{
|
||||
CHECK((false == !resolver.getDataSource(200)));
|
||||
CHECK((false == !resolver.getDataSource("200")));
|
||||
CHECK((false == !resolver.getDataSource("testView")));
|
||||
CHECK((true == !resolver.getDataSource("testViewGUID")));
|
||||
CHECK((false == !resolver.getView(200)));
|
||||
CHECK((false == !resolver.getView("200")));
|
||||
CHECK((false == !resolver.getView("testView")));
|
||||
CHECK((true == !resolver.getView("testViewGUID")));
|
||||
CHECK((true == resolver.getView(200)->deleted()));
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief generate tests
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
|
@ -0,0 +1,368 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Andrey Abramov
|
||||
/// @author Vasiliy Nabatchikov
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "../IResearch/common.h"
|
||||
#include "../IResearch/StorageEngineMock.h"
|
||||
#include "IResearch/ApplicationServerHelper.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/QueryRegistryFeature.h"
|
||||
#include "RestServer/ViewTypesFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
#include "velocypack/Parser.h"
|
||||
|
||||
namespace {
|
||||
|
||||
std::unique_ptr<arangodb::ViewImplementation> makeTestView(
|
||||
arangodb::LogicalView* view,
|
||||
arangodb::velocypack::Slice const& info,
|
||||
bool isNew
|
||||
) {
|
||||
struct Impl: public arangodb::ViewImplementation {
|
||||
Impl(): ViewImplementation(nullptr, arangodb::velocypack::Slice::emptyObjectSlice()) {
|
||||
}
|
||||
virtual void drop() override {}
|
||||
virtual void getPropertiesVPack(
|
||||
arangodb::velocypack::Builder&, bool
|
||||
) const override {
|
||||
}
|
||||
virtual void open() override {}
|
||||
virtual arangodb::Result updateProperties(
|
||||
arangodb::velocypack::Slice const&, bool, bool
|
||||
) override {
|
||||
return arangodb::Result();
|
||||
}
|
||||
virtual bool visitCollections(
|
||||
std::function<bool(TRI_voc_cid_t)> const&
|
||||
) const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
return std::unique_ptr<arangodb::ViewImplementation>(new Impl());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- setup / tear-down
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct VocbaseSetup {
|
||||
StorageEngineMock engine;
|
||||
arangodb::application_features::ApplicationServer server;
|
||||
std::vector<std::pair<arangodb::application_features::ApplicationFeature*, bool>> features;
|
||||
|
||||
VocbaseSetup(): server(nullptr, nullptr) {
|
||||
arangodb::EngineSelectorFeature::ENGINE = &engine;
|
||||
|
||||
// setup required application features
|
||||
features.emplace_back(new arangodb::DatabaseFeature(&server), false); // required for TRI_vocbase_t::dropCollection(...)
|
||||
features.emplace_back(new arangodb::QueryRegistryFeature(&server), false); // required for TRI_vocbase_t instantiation
|
||||
features.emplace_back(new arangodb::ViewTypesFeature(&server), false); // required for TRI_vocbase_t::createView(...)
|
||||
|
||||
for (auto& f: features) {
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(f.first);
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->prepare();
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
if (f.second) {
|
||||
f.first->start();
|
||||
}
|
||||
}
|
||||
|
||||
// register view factory
|
||||
arangodb::iresearch::getFeature<arangodb::ViewTypesFeature>()->emplace(
|
||||
arangodb::LogicalDataSource::Type::emplace(
|
||||
arangodb::velocypack::StringRef("testViewType")
|
||||
),
|
||||
makeTestView
|
||||
);
|
||||
}
|
||||
|
||||
~VocbaseSetup() {
|
||||
arangodb::application_features::ApplicationServer::server = nullptr;
|
||||
arangodb::EngineSelectorFeature::ENGINE = nullptr;
|
||||
|
||||
// destroy application features
|
||||
for (auto& f: features) {
|
||||
if (f.second) {
|
||||
f.first->stop();
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->unprepare();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- test suite
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief setup
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST_CASE("VocbaseTest", "[vocbase]") {
|
||||
VocbaseSetup s;
|
||||
(void)(s);
|
||||
|
||||
SECTION("test_isAllowedName") {
|
||||
// direct (non-system)
|
||||
{
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef(nullptr, 0))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef(""))));
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("abc123"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("123abc"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("123"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("_123"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("_abc"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(false, arangodb::velocypack::StringRef("abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")))); // longer than TRI_COL_NAME_LENGTH
|
||||
}
|
||||
|
||||
// direct (system)
|
||||
{
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef(nullptr, 0))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef(""))));
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("abc123"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("123abc"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("123"))));
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("_123"))));
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("_abc"))));
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(true, arangodb::velocypack::StringRef("abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")))); // longer than TRI_COL_NAME_LENGTH
|
||||
}
|
||||
|
||||
// slice (default)
|
||||
{
|
||||
auto json0 = arangodb::velocypack::Parser::fromJson("{ }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json0->slice())));
|
||||
auto json1 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json1->slice())));
|
||||
auto json2 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"abc123\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json2->slice())));
|
||||
auto json3 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"123abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json3->slice())));
|
||||
auto json4 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"123\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json4->slice())));
|
||||
auto json5 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"_123\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json5->slice())));
|
||||
auto json6 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"_abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json6->slice())));
|
||||
auto json7 = arangodb::velocypack::Parser::fromJson("{ \"name\": \"abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json7->slice()))); // longer than TRI_COL_NAME_LENGTH
|
||||
}
|
||||
|
||||
// slice (non-system)
|
||||
{
|
||||
auto json0 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json0->slice())));
|
||||
auto json1 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json1->slice())));
|
||||
auto json2 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"abc123\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json2->slice())));
|
||||
auto json3 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"123abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json3->slice())));
|
||||
auto json4 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"123\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json4->slice())));
|
||||
auto json5 = arangodb::velocypack::Parser::fromJson("{\"isSystem\": false, \"name\": \"_123\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json5->slice())));
|
||||
auto json6 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"_abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json6->slice())));
|
||||
auto json7 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": 123 }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json7->slice())));
|
||||
auto json8 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": 123, \"name\": \"abc\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json8->slice())));
|
||||
auto json9 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": false, \"name\": \"abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json9->slice()))); // longer than TRI_COL_NAME_LENGTH
|
||||
}
|
||||
|
||||
// slice (system)
|
||||
{
|
||||
auto json0 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json0->slice())));
|
||||
auto json1 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json1->slice())));
|
||||
auto json2 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"abc123\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json2->slice())));
|
||||
auto json3 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"123abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json3->slice())));
|
||||
auto json4 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"123\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json4->slice())));
|
||||
auto json5 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"_123\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json5->slice())));
|
||||
auto json6 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"_abc\" }");
|
||||
CHECK((true == TRI_vocbase_t::IsAllowedName(json6->slice())));
|
||||
auto json7 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": 123 }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json7->slice())));
|
||||
auto json8 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": 123, \"name\": \"_abc\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json8->slice())));
|
||||
auto json9 = arangodb::velocypack::Parser::fromJson("{ \"isSystem\": true, \"name\": \"abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" }");
|
||||
CHECK((false == TRI_vocbase_t::IsAllowedName(json9->slice()))); // longer than TRI_COL_NAME_LENGTH
|
||||
}
|
||||
}
|
||||
|
||||
SECTION("test_isSystemName") {
|
||||
CHECK((false == TRI_vocbase_t::IsSystemName("")));
|
||||
CHECK((true == TRI_vocbase_t::IsSystemName("_")));
|
||||
CHECK((true == TRI_vocbase_t::IsSystemName("_abc")));
|
||||
CHECK((false == TRI_vocbase_t::IsSystemName("abc")));
|
||||
}
|
||||
|
||||
SECTION("test_lookupDataSource") {
|
||||
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"globallyUniqueId\": \"testCollectionGUID\", \"id\": 100, \"name\": \"testCollection\" }");
|
||||
auto viewJson = arangodb::velocypack::Parser::fromJson("{ \"id\": 200, \"name\": \"testView\", \"type\": \"testViewType\" }"); // any arbitrary view type
|
||||
Vocbase vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
|
||||
// not present collection (no datasource)
|
||||
{
|
||||
CHECK((true == !vocbase.lookupDataSource(100)));
|
||||
CHECK((true == !vocbase.lookupDataSource("100")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testCollection")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testCollectionGUID")));
|
||||
CHECK((true == !vocbase.lookupCollection(100)));
|
||||
CHECK((true == !vocbase.lookupCollection("100")));
|
||||
CHECK((true == !vocbase.lookupCollection("testCollection")));
|
||||
CHECK((true == !vocbase.lookupCollection("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// not present view (no datasource)
|
||||
{
|
||||
CHECK((true == !vocbase.lookupDataSource(200)));
|
||||
CHECK((true == !vocbase.lookupDataSource("200")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testView")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testViewGUID")));
|
||||
CHECK((true == !vocbase.lookupView(200)));
|
||||
CHECK((true == !vocbase.lookupView("200")));
|
||||
CHECK((true == !vocbase.lookupView("testView")));
|
||||
CHECK((true == !vocbase.lookupView("testViewGUID")));
|
||||
}
|
||||
|
||||
auto* collection = vocbase.createCollection(collectionJson->slice());
|
||||
auto view = vocbase.createView(viewJson->slice(), 42);
|
||||
|
||||
CHECK((false == collection->deleted()));
|
||||
CHECK((false == view->deleted()));
|
||||
|
||||
// not present collection (is view)
|
||||
{
|
||||
CHECK((false == !vocbase.lookupDataSource(200)));
|
||||
CHECK((false == !vocbase.lookupDataSource("200")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testView")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testViewGUID")));
|
||||
CHECK((true == !vocbase.lookupCollection(200)));
|
||||
CHECK((true == !vocbase.lookupCollection("200")));
|
||||
CHECK((true == !vocbase.lookupCollection("testView")));
|
||||
CHECK((true == !vocbase.lookupCollection("testViewGUID")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testView")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testViewGUID")));
|
||||
}
|
||||
|
||||
// not preset view (is collection)
|
||||
{
|
||||
CHECK((false == !vocbase.lookupDataSource(100)));
|
||||
CHECK((false == !vocbase.lookupDataSource("100")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testCollection")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testCollectionGUID")));
|
||||
CHECK((true == !vocbase.lookupView(100)));
|
||||
CHECK((true == !vocbase.lookupView("100")));
|
||||
CHECK((true == !vocbase.lookupView("testCollection")));
|
||||
CHECK((true == !vocbase.lookupView("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// present collection
|
||||
{
|
||||
CHECK((false == !vocbase.lookupDataSource(100)));
|
||||
CHECK((false == !vocbase.lookupDataSource("100")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testCollection")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testCollectionGUID")));
|
||||
CHECK((false == !vocbase.lookupCollection(100)));
|
||||
CHECK((false == !vocbase.lookupCollection("100")));
|
||||
CHECK((false == !vocbase.lookupCollection("testCollection")));
|
||||
CHECK((false == !vocbase.lookupCollection("testCollectionGUID")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testCollection")));
|
||||
CHECK((false == !vocbase.lookupCollectionByUuid("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// present view
|
||||
{
|
||||
CHECK((false == !vocbase.lookupDataSource(200)));
|
||||
CHECK((false == !vocbase.lookupDataSource("200")));
|
||||
CHECK((false == !vocbase.lookupDataSource("testView")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testViewGUID")));
|
||||
CHECK((false == !vocbase.lookupView(200)));
|
||||
CHECK((false == !vocbase.lookupView("200")));
|
||||
CHECK((false == !vocbase.lookupView("testView")));
|
||||
CHECK((true == !vocbase.lookupView("testViewGUID")));
|
||||
}
|
||||
|
||||
CHECK((TRI_ERROR_NO_ERROR == vocbase.dropCollection(collection, true, 0)));
|
||||
CHECK((TRI_ERROR_NO_ERROR == vocbase.dropView(view)));
|
||||
CHECK((true == collection->deleted()));
|
||||
CHECK((true == view->deleted()));
|
||||
|
||||
// not present collection (deleted)
|
||||
{
|
||||
CHECK((true == !vocbase.lookupDataSource(100)));
|
||||
CHECK((true == !vocbase.lookupDataSource("100")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testCollection")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testCollectionGUID")));
|
||||
CHECK((true == !vocbase.lookupCollection(100)));
|
||||
CHECK((true == !vocbase.lookupCollection("100")));
|
||||
CHECK((true == !vocbase.lookupCollection("testCollection")));
|
||||
CHECK((true == !vocbase.lookupCollection("testCollectionGUID")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testCollection")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testCollectionGUID")));
|
||||
}
|
||||
|
||||
// not present view (deleted)
|
||||
{
|
||||
CHECK((true == !vocbase.lookupDataSource(200)));
|
||||
CHECK((true == !vocbase.lookupDataSource("200")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testView")));
|
||||
CHECK((true == !vocbase.lookupDataSource("testViewGUID")));
|
||||
CHECK((true == !vocbase.lookupView(200)));
|
||||
CHECK((true == !vocbase.lookupView("200")));
|
||||
CHECK((true == !vocbase.lookupView("testView")));
|
||||
CHECK((true == !vocbase.lookupView("testViewGUID")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testCollection")));
|
||||
CHECK((true == !vocbase.lookupCollectionByUuid("testCollectionGUID")));
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief generate tests
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
Loading…
Reference in New Issue