mirror of https://gitee.com/bigwinds/arangodb
Feature/count plan nodes by type (#5225)
This commit is contained in:
parent
a1416e1067
commit
5357adad95
|
@ -163,11 +163,13 @@ void EngineInfoContainerDBServer::EngineInfo::serializeSnippet(
|
|||
ExecutionPlan plan(query->ast());
|
||||
ExecutionNode* previous = nullptr;
|
||||
|
||||
// for (ExecutionNode const* current : _nodes) {
|
||||
for (auto enIt = _nodes.rbegin(); enIt != _nodes.rend(); ++enIt) {
|
||||
ExecutionNode const* current = *enIt;
|
||||
auto clone = current->clone(&plan, false, false);
|
||||
// UNNECESSARY, because clone does it: plan.registerNode(clone);
|
||||
|
||||
// we need to count nodes by type ourselves, as we will set the "varUsageComputed"
|
||||
// flag below (which will handle the counting)
|
||||
plan.increaseCounter(clone->getType());
|
||||
|
||||
if (current->getType() == ExecutionNode::REMOTE) {
|
||||
auto rem = static_cast<RemoteNode*>(clone);
|
||||
|
|
|
@ -110,6 +110,20 @@ void ExecutionNode::validateType(int type) {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_NOT_IMPLEMENTED, "unknown TypeID");
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief add a dependency
|
||||
void ExecutionNode::addDependency(ExecutionNode* ep) {
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
_dependencies.emplace_back(ep);
|
||||
ep->_parents.emplace_back(this);
|
||||
}
|
||||
|
||||
/// @brief add a parent
|
||||
void ExecutionNode::addParent(ExecutionNode* ep) {
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
ep->_dependencies.emplace_back(this);
|
||||
_parents.emplace_back(ep);
|
||||
}
|
||||
|
||||
void ExecutionNode::getSortElements(SortElementVector& elements,
|
||||
ExecutionPlan* plan,
|
||||
|
@ -1158,6 +1172,95 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
|
|||
en->setRegsToClear(std::move(regsToClear));
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief replace a dependency, returns true if the pointer was found and
|
||||
/// replaced, please note that this does not delete oldNode!
|
||||
bool ExecutionNode::replaceDependency(ExecutionNode* oldNode, ExecutionNode* newNode) {
|
||||
TRI_ASSERT(oldNode != nullptr);
|
||||
TRI_ASSERT(newNode != nullptr);
|
||||
|
||||
auto it = _dependencies.begin();
|
||||
|
||||
while (it != _dependencies.end()) {
|
||||
if (*it == oldNode) {
|
||||
*it = newNode;
|
||||
try {
|
||||
newNode->_parents.emplace_back(this);
|
||||
} catch (...) {
|
||||
*it = oldNode; // roll back
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
for (auto it2 = oldNode->_parents.begin();
|
||||
it2 != oldNode->_parents.end(); ++it2) {
|
||||
if (*it2 == this) {
|
||||
oldNode->_parents.erase(it2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
// If this happens, we ignore that the _parents of oldNode
|
||||
// are not set correctly
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// @brief remove a dependency, returns true if the pointer was found and
|
||||
/// removed, please note that this does not delete ep!
|
||||
bool ExecutionNode::removeDependency(ExecutionNode* ep) {
|
||||
bool ok = false;
|
||||
for (auto it = _dependencies.begin(); it != _dependencies.end(); ++it) {
|
||||
if (*it == ep) {
|
||||
try {
|
||||
it = _dependencies.erase(it);
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now remove us as a parent of the old dependency as well:
|
||||
for (auto it = ep->_parents.begin(); it != ep->_parents.end(); ++it) {
|
||||
if (*it == this) {
|
||||
try {
|
||||
ep->_parents.erase(it);
|
||||
} catch (...) {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// @brief remove all dependencies for the given node
|
||||
void ExecutionNode::removeDependencies() {
|
||||
for (auto& x : _dependencies) {
|
||||
for (auto it = x->_parents.begin(); it != x->_parents.end(); /* no hoisting */) {
|
||||
if (*it == this) {
|
||||
try {
|
||||
it = x->_parents.erase(it);
|
||||
} catch (...) {
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
_dependencies.clear();
|
||||
}
|
||||
|
||||
/// @brief creates corresponding ExecutionBlock
|
||||
std::unique_ptr<ExecutionBlock> SingletonNode::createBlock(
|
||||
|
@ -1575,7 +1678,7 @@ bool SubqueryNode::isModificationQuery() const {
|
|||
|
||||
stack.pop_back();
|
||||
|
||||
current->addDependencies(stack);
|
||||
current->dependencies(stack);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -47,10 +47,6 @@
|
|||
//
|
||||
// If you wish to unlink (remove) or replace a node you should do it by using
|
||||
// one of the plans operations.
|
||||
//
|
||||
// addDependency(Parent) has a totally different functionality as addDependencies(Parents)
|
||||
// the latter is not adding a list of Dependencies to a node!!!
|
||||
//
|
||||
|
||||
#ifndef ARANGOD_AQL_EXECUTION_NODE_H
|
||||
#define ARANGOD_AQL_EXECUTION_NODE_H 1
|
||||
|
@ -137,8 +133,15 @@ class ExecutionNode {
|
|||
#ifdef USE_IRESEARCH
|
||||
ENUMERATE_IRESEARCH_VIEW = 25
|
||||
#endif
|
||||
// adjust MaxNodeTypeValue below when new ExecutionNode types are added!
|
||||
};
|
||||
|
||||
#ifdef USE_IRESEARCH
|
||||
static constexpr size_t MaxNodeTypeValue = ENUMERATE_IRESEARCH_VIEW;
|
||||
#else
|
||||
static constexpr size_t MaxNodeTypeValue = SHORTEST_PATH;
|
||||
#endif
|
||||
|
||||
ExecutionNode() = delete;
|
||||
ExecutionNode(ExecutionNode const&) = delete;
|
||||
ExecutionNode& operator=(ExecutionNode const&) = delete;
|
||||
|
@ -178,18 +181,10 @@ class ExecutionNode {
|
|||
static void validateType(int type);
|
||||
|
||||
/// @brief add a dependency
|
||||
void addDependency(ExecutionNode* ep) {
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
_dependencies.emplace_back(ep);
|
||||
ep->_parents.emplace_back(this);
|
||||
}
|
||||
void addDependency(ExecutionNode*);
|
||||
|
||||
/// @brief add a parent
|
||||
void addParent(ExecutionNode* ep) {
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
ep->_dependencies.emplace_back(this);
|
||||
_parents.emplace_back(ep);
|
||||
}
|
||||
void addParent(ExecutionNode*);
|
||||
|
||||
/// @brief get all dependencies
|
||||
TEST_VIRTUAL std::vector<ExecutionNode*> getDependencies() const { return _dependencies; }
|
||||
|
@ -207,9 +202,7 @@ class ExecutionNode {
|
|||
bool hasDependency() const { return (_dependencies.size() == 1); }
|
||||
|
||||
/// @brief add the node dependencies to a vector
|
||||
/// ATTENTION - this function has nothing to do with the addDependency function
|
||||
// maybe another name should be used.
|
||||
void addDependencies(std::vector<ExecutionNode*>& result) const {
|
||||
void dependencies(std::vector<ExecutionNode*>& result) const {
|
||||
for (auto const& it : _dependencies) {
|
||||
TRI_ASSERT(it != nullptr);
|
||||
result.emplace_back(it);
|
||||
|
@ -232,7 +225,7 @@ class ExecutionNode {
|
|||
}
|
||||
|
||||
/// @brief add the node parents to a vector
|
||||
void addParents(std::vector<ExecutionNode*>& result) const {
|
||||
void parents(std::vector<ExecutionNode*>& result) const {
|
||||
for (auto const& it : _parents) {
|
||||
TRI_ASSERT(it != nullptr);
|
||||
result.emplace_back(it);
|
||||
|
@ -265,92 +258,14 @@ class ExecutionNode {
|
|||
|
||||
/// @brief replace a dependency, returns true if the pointer was found and
|
||||
/// replaced, please note that this does not delete oldNode!
|
||||
bool replaceDependency(ExecutionNode* oldNode, ExecutionNode* newNode) {
|
||||
TRI_ASSERT(oldNode != nullptr);
|
||||
TRI_ASSERT(newNode != nullptr);
|
||||
|
||||
auto it = _dependencies.begin();
|
||||
|
||||
while (it != _dependencies.end()) {
|
||||
if (*it == oldNode) {
|
||||
*it = newNode;
|
||||
try {
|
||||
newNode->_parents.emplace_back(this);
|
||||
} catch (...) {
|
||||
*it = oldNode; // roll back
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
for (auto it2 = oldNode->_parents.begin();
|
||||
it2 != oldNode->_parents.end(); ++it2) {
|
||||
if (*it2 == this) {
|
||||
oldNode->_parents.erase(it2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
// If this happens, we ignore that the _parents of oldNode
|
||||
// are not set correctly
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool replaceDependency(ExecutionNode* oldNode, ExecutionNode* newNode);
|
||||
|
||||
/// @brief remove a dependency, returns true if the pointer was found and
|
||||
/// removed, please note that this does not delete ep!
|
||||
bool removeDependency(ExecutionNode* ep) {
|
||||
bool ok = false;
|
||||
for (auto it = _dependencies.begin(); it != _dependencies.end(); ++it) {
|
||||
if (*it == ep) {
|
||||
try {
|
||||
it = _dependencies.erase(it);
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now remove us as a parent of the old dependency as well:
|
||||
for (auto it = ep->_parents.begin(); it != ep->_parents.end(); ++it) {
|
||||
if (*it == this) {
|
||||
try {
|
||||
ep->_parents.erase(it);
|
||||
} catch (...) {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
bool removeDependency(ExecutionNode*);
|
||||
|
||||
/// @brief remove all dependencies for the given node
|
||||
void removeDependencies() {
|
||||
for (auto& x : _dependencies) {
|
||||
for (auto it = x->_parents.begin(); it != x->_parents.end(); /* no hoisting */) {
|
||||
if (*it == this) {
|
||||
try {
|
||||
it = x->_parents.erase(it);
|
||||
} catch (...) {
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
_dependencies.clear();
|
||||
}
|
||||
void removeDependencies();
|
||||
|
||||
/// @brief creates corresponding ExecutionBlock
|
||||
virtual std::unique_ptr<ExecutionBlock> createBlock(
|
||||
|
@ -536,7 +451,7 @@ class ExecutionNode {
|
|||
RegisterPlan() : depth(0), totalNrRegs(0), me(nullptr) {
|
||||
nrRegsHere.emplace_back(0);
|
||||
nrRegs.emplace_back(0);
|
||||
};
|
||||
}
|
||||
|
||||
void clear();
|
||||
|
||||
|
|
|
@ -59,7 +59,26 @@ using namespace arangodb;
|
|||
using namespace arangodb::aql;
|
||||
using namespace arangodb::basics;
|
||||
|
||||
static uint64_t checkTraversalDepthValue(AstNode const* node) {
|
||||
namespace {
|
||||
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
/// @brief validate the counters of the plan
|
||||
struct NodeCounter final : public WalkerWorker<ExecutionNode> {
|
||||
std::array<uint32_t, ExecutionNode::MaxNodeTypeValue + 1> counts;
|
||||
|
||||
NodeCounter() : counts{} {}
|
||||
|
||||
bool enterSubquery(ExecutionNode*, ExecutionNode*) override final {
|
||||
return true;
|
||||
}
|
||||
|
||||
void after(ExecutionNode* en) override final {
|
||||
counts[en->getType()]++;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
uint64_t checkTraversalDepthValue(AstNode const* node) {
|
||||
if (!node->isNumericValue()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_PARSE,
|
||||
"invalid traversal depth");
|
||||
|
@ -81,7 +100,7 @@ static uint64_t checkTraversalDepthValue(AstNode const* node) {
|
|||
return static_cast<uint64_t>(v);
|
||||
}
|
||||
|
||||
static std::unique_ptr<graph::BaseOptions> CreateTraversalOptions(
|
||||
std::unique_ptr<graph::BaseOptions> createTraversalOptions(
|
||||
aql::Query* query, AstNode const* direction,
|
||||
AstNode const* optionsNode) {
|
||||
auto options = std::make_unique<traverser::TraverserOptions>(query);
|
||||
|
@ -161,7 +180,7 @@ static std::unique_ptr<graph::BaseOptions> CreateTraversalOptions(
|
|||
return ret;
|
||||
}
|
||||
|
||||
static std::unique_ptr<graph::BaseOptions> CreateShortestPathOptions(
|
||||
std::unique_ptr<graph::BaseOptions> createShortestPathOptions(
|
||||
arangodb::aql::Query* query, AstNode const* node) {
|
||||
auto options = std::make_unique<graph::ShortestPathOptions>(query);
|
||||
|
||||
|
@ -191,6 +210,8 @@ static std::unique_ptr<graph::BaseOptions> CreateShortestPathOptions(
|
|||
return ret;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
/// @brief create the plan
|
||||
ExecutionPlan::ExecutionPlan(Ast* ast)
|
||||
: _ids(),
|
||||
|
@ -200,10 +221,30 @@ ExecutionPlan::ExecutionPlan(Ast* ast)
|
|||
_nextId(0),
|
||||
_ast(ast),
|
||||
_lastLimitNode(nullptr),
|
||||
_subqueries() {}
|
||||
_subqueries(),
|
||||
_typeCounts{} {}
|
||||
|
||||
/// @brief destroy the plan, frees all assigned nodes
|
||||
ExecutionPlan::~ExecutionPlan() {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
if (_root != nullptr) {
|
||||
try {
|
||||
// count the actual number of nodes in the plan
|
||||
::NodeCounter counter;
|
||||
_root->walk(counter);
|
||||
|
||||
|
||||
// and compare it to the number of nodes we have in our counters array
|
||||
size_t j = 0;
|
||||
for (auto const& it : _typeCounts) {
|
||||
TRI_ASSERT(counter.counts[j++] == it);
|
||||
}
|
||||
} catch (...) {
|
||||
// should not happen...
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (auto& x : _ids) {
|
||||
delete x.second;
|
||||
}
|
||||
|
@ -231,10 +272,20 @@ ExecutionPlan* ExecutionPlan::instantiateFromAst(Ast* ast) {
|
|||
|
||||
return plan.release();
|
||||
}
|
||||
|
||||
/// @brief whether or not the plan contains at least one node of this type
|
||||
bool ExecutionPlan::contains(ExecutionNode::NodeType type) const {
|
||||
TRI_ASSERT(_varUsageComputed);
|
||||
return _typeCounts[type] > 0;
|
||||
}
|
||||
|
||||
/// @brief increase the node counter for the type
|
||||
void ExecutionPlan::increaseCounter(ExecutionNode::NodeType type) noexcept {
|
||||
++_typeCounts[type];
|
||||
}
|
||||
|
||||
/// @brief process the list of collections in a VelocyPack
|
||||
void ExecutionPlan::getCollectionsFromVelocyPack(Ast* ast,
|
||||
VPackSlice const slice) {
|
||||
void ExecutionPlan::getCollectionsFromVelocyPack(Ast* ast, VPackSlice const slice) {
|
||||
TRI_ASSERT(ast != nullptr);
|
||||
|
||||
VPackSlice collectionsSlice = slice.get("collections");
|
||||
|
@ -263,7 +314,7 @@ ExecutionPlan* ExecutionPlan::instantiateFromVelocyPack(
|
|||
|
||||
auto plan = std::make_unique<ExecutionPlan>(ast);
|
||||
plan->_root = plan->fromSlice(slice);
|
||||
plan->_varUsageComputed = true;
|
||||
plan->setVarUsageComputed();
|
||||
|
||||
return plan.release();
|
||||
}
|
||||
|
@ -277,10 +328,6 @@ ExecutionPlan* ExecutionPlan::clone(Ast* ast) {
|
|||
plan->_appliedRules = _appliedRules;
|
||||
plan->_isResponsibleForInitialize = _isResponsibleForInitialize;
|
||||
|
||||
// plan->findVarUsage();
|
||||
// Let's not do it here, because supposedly the plan is modified as
|
||||
// the very next thing anyway!
|
||||
|
||||
return plan.release();
|
||||
}
|
||||
|
||||
|
@ -691,6 +738,7 @@ ExecutionNode* ExecutionPlan::registerNode(ExecutionNode* node) {
|
|||
delete node;
|
||||
throw;
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
|
@ -817,7 +865,7 @@ ExecutionNode* ExecutionPlan::fromNodeTraversal(ExecutionNode* previous,
|
|||
previous = calc;
|
||||
}
|
||||
|
||||
auto options = CreateTraversalOptions(getAst()->query(), direction,
|
||||
auto options = createTraversalOptions(getAst()->query(), direction,
|
||||
node->getMember(3));
|
||||
|
||||
TRI_ASSERT(direction->type == NODE_TYPE_DIRECTION);
|
||||
|
@ -899,7 +947,7 @@ ExecutionNode* ExecutionPlan::fromNodeShortestPath(ExecutionNode* previous,
|
|||
parseTraversalVertexNode(previous, node->getMember(2));
|
||||
AstNode const* graph = node->getMember(3);
|
||||
|
||||
auto options = CreateShortestPathOptions(getAst()->query(), node->getMember(4));
|
||||
auto options = createShortestPathOptions(getAst()->query(), node->getMember(4));
|
||||
|
||||
// First create the node
|
||||
auto spNode = new ShortestPathNode(this, nextId(), _ast->query()->vocbase(),
|
||||
|
@ -1751,6 +1799,12 @@ ExecutionNode* ExecutionPlan::fromNode(AstNode const* node) {
|
|||
void ExecutionPlan::findNodesOfType(SmallVector<ExecutionNode*>& result,
|
||||
ExecutionNode::NodeType type,
|
||||
bool enterSubqueries) {
|
||||
// consult our nodes-of-type counters array
|
||||
if (!contains(type)) {
|
||||
// node type not present in plan, do nothing
|
||||
return;
|
||||
}
|
||||
|
||||
NodeFinder<ExecutionNode::NodeType> finder(type, result, enterSubqueries);
|
||||
root()->walk(finder);
|
||||
}
|
||||
|
@ -1759,9 +1813,18 @@ void ExecutionPlan::findNodesOfType(SmallVector<ExecutionNode*>& result,
|
|||
void ExecutionPlan::findNodesOfType(
|
||||
SmallVector<ExecutionNode*>& result,
|
||||
std::vector<ExecutionNode::NodeType> const& types, bool enterSubqueries) {
|
||||
NodeFinder<std::vector<ExecutionNode::NodeType>> finder(types, result,
|
||||
enterSubqueries);
|
||||
root()->walk(finder);
|
||||
|
||||
// check if any of the node types is actually present in the plan
|
||||
for (auto const& type : types) {
|
||||
if (contains(type)) {
|
||||
// found a node type that is in the plan
|
||||
NodeFinder<std::vector<ExecutionNode::NodeType>> finder(types, result,
|
||||
enterSubqueries);
|
||||
root()->walk(finder);
|
||||
// abort, because we were lookig for all nodes at the same type
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief find all end nodes in a plan
|
||||
|
@ -1796,6 +1859,9 @@ struct VarUsageFinder final : public WalkerWorker<ExecutionNode> {
|
|||
}
|
||||
|
||||
bool before(ExecutionNode* en) override final {
|
||||
// count the type of node found
|
||||
en->plan()->increaseCounter(en->getType());
|
||||
|
||||
en->invalidateVarUsage();
|
||||
en->setVarsUsedLater(_usedLater);
|
||||
// Add variables used here to _usedLater:
|
||||
|
@ -1827,11 +1893,16 @@ struct VarUsageFinder final : public WalkerWorker<ExecutionNode> {
|
|||
};
|
||||
|
||||
/// @brief determine and set _varsUsedLater in all nodes
|
||||
/// as a side effect, count the different types of nodes in the plan
|
||||
void ExecutionPlan::findVarUsage() {
|
||||
// reset all counters
|
||||
for (auto& counter : _typeCounts) {
|
||||
counter = 0;
|
||||
}
|
||||
|
||||
_varSetBy.clear();
|
||||
::VarUsageFinder finder(&_varSetBy);
|
||||
root()->walk(finder);
|
||||
// _varSetBy = *finder._varSetBy;
|
||||
|
||||
_varUsageComputed = true;
|
||||
}
|
||||
|
@ -1931,7 +2002,7 @@ void ExecutionPlan::insertDependency(ExecutionNode* oldNode,
|
|||
TRI_ASSERT(newNode->getDependencies().empty());
|
||||
TRI_ASSERT(oldNode->getDependencies().size() == 1);
|
||||
TRI_ASSERT(newNode != nullptr);
|
||||
|
||||
|
||||
auto oldDeps = oldNode->getDependencies(); // Intentional copy
|
||||
TRI_ASSERT(!oldDeps.empty());
|
||||
|
||||
|
@ -1965,7 +2036,7 @@ ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"plan \"nodes\" attribute is not an array");
|
||||
}
|
||||
|
||||
|
||||
ExecutionNode* ret = nullptr;
|
||||
|
||||
// first, re-create all nodes from the Slice, using the node ids
|
||||
|
@ -1979,6 +2050,11 @@ ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
|
|||
ret = ExecutionNode::fromVPackFactory(this, it);
|
||||
registerNode(ret);
|
||||
|
||||
// we have to count all nodes by their type here, because our caller
|
||||
// will set the _varUsageComputed flag to true manually, bypassing the
|
||||
// regular counting!
|
||||
increaseCounter(ret->getType());
|
||||
|
||||
TRI_ASSERT(ret != nullptr);
|
||||
|
||||
if (ret->getType() == arangodb::aql::ExecutionNode::SUBQUERY) {
|
||||
|
|
|
@ -190,8 +190,7 @@ class ExecutionPlan {
|
|||
void unlinkNode(ExecutionNode*, bool = false);
|
||||
|
||||
/// @brief add a node to the plan, will delete node if addition fails and
|
||||
/// throw an exception, in addition, the pointer is set to nullptr such
|
||||
/// that another delete does not hurt
|
||||
/// throw an exception
|
||||
ExecutionNode* registerNode(ExecutionNode*);
|
||||
|
||||
/// @brief replaceNode, note that <newNode> must be registered with the plan
|
||||
|
@ -212,6 +211,12 @@ class ExecutionPlan {
|
|||
/// @brief creates an anonymous calculation node for an arbitrary expression
|
||||
ExecutionNode* createTemporaryCalculation(AstNode const*, ExecutionNode*);
|
||||
|
||||
/// @brief whether or not the plan contains at least one node of this type
|
||||
bool contains(ExecutionNode::NodeType type) const;
|
||||
|
||||
/// @brief increase the node counter for the type
|
||||
void increaseCounter(ExecutionNode::NodeType type) noexcept;
|
||||
|
||||
private:
|
||||
/// @brief creates a calculation node
|
||||
ExecutionNode* createCalculation(Variable*, Variable const*, AstNode const*,
|
||||
|
@ -327,6 +332,9 @@ class ExecutionPlan {
|
|||
|
||||
/// @brief these nodes will be excluded from building scatter/gather "diamonds" later
|
||||
std::unordered_set<ExecutionNode const*> _excludeFromScatterGather;
|
||||
|
||||
/// @brief number of nodes used in the plan, by type
|
||||
std::array<uint32_t, ExecutionNode::MaxNodeTypeValue + 1> _typeCounts;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -526,7 +526,7 @@ void arangodb::aql::removeRedundantSortsRule(
|
|||
// we found a sort that we can understand
|
||||
std::vector<ExecutionNode*> stack;
|
||||
|
||||
sortNode->addDependencies(stack);
|
||||
sortNode->dependencies(stack);
|
||||
|
||||
int nodesRelyingOnSort = 0;
|
||||
|
||||
|
@ -623,7 +623,7 @@ void arangodb::aql::removeRedundantSortsRule(
|
|||
break;
|
||||
}
|
||||
|
||||
current->addDependencies(stack);
|
||||
current->dependencies(stack);
|
||||
}
|
||||
|
||||
if (toUnlink.find(n) == toUnlink.end() &&
|
||||
|
@ -1111,7 +1111,7 @@ void arangodb::aql::moveCalculationsDownRule(
|
|||
auto variable = nn->outVariable();
|
||||
|
||||
std::vector<ExecutionNode*> stack;
|
||||
n->addParents(stack);
|
||||
n->parents(stack);
|
||||
|
||||
bool shouldMove = false;
|
||||
ExecutionNode* lastNode = nullptr;
|
||||
|
@ -1158,7 +1158,7 @@ void arangodb::aql::moveCalculationsDownRule(
|
|||
break;
|
||||
}
|
||||
|
||||
current->addParents(stack);
|
||||
current->parents(stack);
|
||||
}
|
||||
|
||||
if (shouldMove && lastNode != nullptr) {
|
||||
|
@ -1269,7 +1269,6 @@ void arangodb::aql::specializeCollectRule(Optimizer* opt,
|
|||
sortNode->addDependency(newCollectNode);
|
||||
parent->replaceDependency(newCollectNode, sortNode);
|
||||
}
|
||||
newPlan->findVarUsage();
|
||||
|
||||
if (nodes.size() > 1) {
|
||||
// this will tell the optimizer to optimize the cloned plan with this
|
||||
|
@ -1410,7 +1409,7 @@ void arangodb::aql::moveFiltersUpRule(Optimizer* opt,
|
|||
TRI_ASSERT(neededVars.size() == 1);
|
||||
|
||||
std::vector<ExecutionNode*> stack;
|
||||
n->addDependencies(stack);
|
||||
n->dependencies(stack);
|
||||
|
||||
while (!stack.empty()) {
|
||||
auto current = stack.back();
|
||||
|
@ -1463,7 +1462,7 @@ void arangodb::aql::moveFiltersUpRule(Optimizer* opt,
|
|||
break;
|
||||
}
|
||||
|
||||
current->addDependencies(stack);
|
||||
current->dependencies(stack);
|
||||
|
||||
// first, unlink the filter from the plan
|
||||
plan->unlinkNode(n);
|
||||
|
@ -1692,7 +1691,7 @@ void arangodb::aql::removeRedundantCalculationsRule(
|
|||
buffer.reset();
|
||||
|
||||
std::vector<ExecutionNode*> stack;
|
||||
n->addDependencies(stack);
|
||||
n->dependencies(stack);
|
||||
|
||||
while (!stack.empty()) {
|
||||
auto current = stack.back();
|
||||
|
@ -1769,7 +1768,7 @@ void arangodb::aql::removeRedundantCalculationsRule(
|
|||
break;
|
||||
}
|
||||
|
||||
current->addDependencies(stack);
|
||||
current->dependencies(stack);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -398,6 +398,11 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryHash) {
|
|||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
TRI_ASSERT(plan != nullptr);
|
||||
if (!plan->varUsageComputed()) {
|
||||
plan->findVarUsage();
|
||||
}
|
||||
|
||||
enterState(QueryExecutionState::ValueType::EXECUTION);
|
||||
|
||||
|
@ -514,13 +519,14 @@ ExecutionPlan* Query::preparePlan() {
|
|||
// oops
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "could not create plan from vpack");
|
||||
}
|
||||
|
||||
if (!plan->varUsageComputed()) {
|
||||
plan->findVarUsage();
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(plan != nullptr);
|
||||
|
||||
// varsUsedLater and varsValid are unordered_sets and so their orders
|
||||
// are not the same in the serialized and deserialized plans
|
||||
|
||||
// return the V8 context if we are in one
|
||||
exitContext();
|
||||
|
||||
|
|
|
@ -771,7 +771,7 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
rocksdb::ReadOptions ro = mthd->readOptions();
|
||||
rocksdb::Slice const end = documentBounds.end();
|
||||
ro.iterate_upper_bound = &end;
|
||||
|
||||
|
||||
// avoid OOM error for truncate by committing earlier
|
||||
uint64_t const prvICC = state->options().intermediateCommitCount;
|
||||
state->options().intermediateCommitCount = std::min<uint64_t>(prvICC, 10000);
|
||||
|
@ -788,9 +788,10 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
TRI_ASSERT(doc.isObject());
|
||||
|
||||
// To print the WAL we need key and RID
|
||||
VPackSlice key = transaction::helpers::extractKeyFromDocument(doc);
|
||||
VPackSlice key;
|
||||
TRI_voc_rid_t rid = 0;
|
||||
transaction::helpers::extractKeyAndRevFromDocument(doc, key, rid);
|
||||
TRI_ASSERT(key.isString());
|
||||
TRI_voc_rid_t rid = transaction::helpers::extractRevFromDocument(doc);
|
||||
TRI_ASSERT(rid != 0);
|
||||
|
||||
state->prepareOperation(
|
||||
|
@ -815,7 +816,7 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
// transaction size limit reached
|
||||
if (res.fail()) {
|
||||
// This should never happen...
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
trackWaitForSync(trx, options);
|
||||
|
|
Loading…
Reference in New Issue