1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Kaveh Vahedipour 2016-11-28 16:10:03 +01:00
commit 6a86321fde
26 changed files with 808 additions and 448 deletions

View File

@ -466,8 +466,12 @@ if [ -z "${MSVC}" ]; then
# on all other system cmake tends to be sluggish on finding strip.
# workaround by presetting it:
if test -z "${STRIP}"; then
export STRIP=`which strip`
STRIP=/usr/bin/strip
if [ ! -f ${STRIP} ] ; then
STRIP=`which strip`
fi
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DCMAKE_STRIP=${STRIP}"
export STRIP
fi
fi

View File

@ -21,6 +21,33 @@
/// @author Max Neunhoeffer
////////////////////////////////////////////////////////////////////////////////
// Execution plans like the one below are made of Nodes that inherit the
// ExecutionNode class as a base class.
//
// Execution plan:
// Id NodeType Est. Comment
// 1 SingletonNode 1 * ROOT
// 2 EnumerateCollectionNode 6400 - FOR d IN ulf /* full collection scan */
// 3 CalculationNode 6400 - LET #1 = DISTANCE(d.`lat`, d.`lon`, 0, 0) /* simple expression */ /* collections used: d : ulf */
// 4 SortNode 6400 - SORT #1 ASC
// 5 LimitNode 5 - LIMIT 0, 5
// 6 ReturnNode 5 - RETURN d
//
// Even though the Singleton Node has a comment saying it is the "ROOT" node
// you receive a pointer to LimitNode by calling getFirstParent on the SortNode
// (effectively going down the list). If you want to go up from 5 to 4 you need
// to call getFirstDependency to get a pointer to the SortNode.
//
// For most maybe all operations you will only need to operate on the Dependencies
// the parents will be updated automatically.
//
// If you wish to unlink (remove) or replace a node you should to it by using
// one of the plans operations.
//
// addDependency(Parent) has a totally different functionality as addDependencies(Parents)
// the latter is not adding a list of Dependencies to a node!!!
#ifndef ARANGOD_AQL_EXECUTION_NODE_H
#define ARANGOD_AQL_EXECUTION_NODE_H 1
@ -156,6 +183,8 @@ class ExecutionNode {
bool hasDependency() const { return (_dependencies.size() == 1); }
/// @brief add the node dependencies to a vector
/// ATTENTION - this function has nothing to do with the addDependency function
// maybe another name should be used.
void addDependencies(std::vector<ExecutionNode*>& result) const {
for (auto const& it : _dependencies) {
result.emplace_back(it);
@ -433,7 +462,7 @@ class ExecutionNode {
return false;
}
ExecutionPlan const* plan() const {
ExecutionPlan const* plan() const {
return _plan;
}
@ -510,7 +539,7 @@ class ExecutionNode {
/// @brief get depth
int getDepth() const { return _depth; }
/// @brief get registers to clear
std::unordered_set<RegisterId> const& getRegsToClear() const {
return _regsToClear;
@ -677,7 +706,7 @@ class EnumerateCollectionNode : public ExecutionNode {
std::vector<Variable const*> getVariablesSetHere() const override final {
return std::vector<Variable const*>{_outVariable};
}
/// @brief the node is only non-deterministic if it uses a random sort order
bool isDeterministic() override final { return !_random; }
@ -927,7 +956,7 @@ class CalculationNode : public ExecutionNode {
/// @brief can the node throw?
bool canThrow() override final { return _expression->canThrow(); }
bool isDeterministic() override final { return _expression->isDeterministic(); }
private:
@ -1014,10 +1043,10 @@ class SubqueryNode : public ExecutionNode {
/// *originate* from this node. That is, this method does not need to
/// return true just because a dependent node can throw an exception.
bool canThrow() override final;
bool isDeterministic() override final;
bool isConst();
bool isConst();
private:
/// @brief we need to have an expression and where to write the result
@ -1181,7 +1210,7 @@ class NoResultsNode : public ExecutionNode {
/// @brief constructor with an id
public:
NoResultsNode(ExecutionPlan* plan, size_t id) : ExecutionNode(plan, id) {}
NoResultsNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
: ExecutionNode(plan, base) {}

View File

@ -347,7 +347,7 @@ void Optimizer::setupRules() {
// rule not yet tested
registerRule("split-filters",
splitFiltersRule,
splitFiltersRule_pass1,
splitFiltersRule_pass1,
true);
#endif
@ -413,7 +413,7 @@ void Optimizer::setupRules() {
// merge filters into traversals
registerRule("optimize-traversals", optimizeTraversalsRule,
optimizeTraversalsRule_pass6, DoesNotCreateAdditionalPlans, true);
// prepare traversal info
registerRule("prepare-traversals", prepareTraversalsRule,
prepareTraversalsRule_pass6, DoesNotCreateAdditionalPlans, false, true);
@ -485,6 +485,10 @@ void Optimizer::setupRules() {
registerRule("patch-update-statements", patchUpdateStatementsRule,
patchUpdateStatementsRule_pass9, DoesNotCreateAdditionalPlans, true);
// patch update statements
registerRule("geo-index-optimizer", optimizeGeoIndexRule,
geoDistanceRule, DoesNotCreateAdditionalPlans, true);
if (arangodb::ServerState::instance()->isCoordinator()) {
// distribute operations in cluster
registerRule("scatter-in-cluster", scatterInClusterRule,

View File

@ -69,7 +69,7 @@ class Optimizer {
// determine the "right" type of CollectNode and
// add a sort node for each COLLECT (may be removed later)
specializeCollectRule_pass1 = 105,
inlineSubqueriesRule_pass1 = 106,
// split and-combined filters into multiple smaller filters
@ -192,7 +192,9 @@ class Optimizer {
removeUnnecessaryRemoteScatterRule_pass10 = 1040,
// recognize that a RemoveNode can be moved to the shards
undistributeRemoveAfterEnumCollRule_pass10 = 1050
undistributeRemoveAfterEnumCollRule_pass10 = 1050,
geoDistanceRule = 1060
};
public:

View File

@ -46,6 +46,10 @@
#include "Cluster/ClusterInfo.h"
#include "Utils/Transaction.h"
#include "VocBase/TraverserOptions.h"
#include "Indexes/Index.h"
#include <boost/optional.hpp>
#include <tuple>
#include <iostream>
using namespace arangodb;
using namespace arangodb::aql;
@ -57,7 +61,7 @@ void arangodb::aql::sortInValuesRule(Optimizer* opt, ExecutionPlan* plan,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::FILTER, true);
bool modified = false;
for (auto const& n : nodes) {
@ -383,7 +387,7 @@ void arangodb::aql::removeUnnecessaryFiltersRule(Optimizer* opt,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::FILTER, true);
bool modified = false;
std::unordered_set<ExecutionNode*> toUnlink;
@ -447,7 +451,7 @@ void arangodb::aql::removeCollectVariablesRule(Optimizer* opt,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::COLLECT, true);
bool modified = false;
for (auto const& n : nodes) {
@ -705,7 +709,7 @@ void arangodb::aql::removeSortRandRule(Optimizer* opt, ExecutionPlan* plan,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::SORT, true);
bool modified = false;
for (auto const& n : nodes) {
@ -954,8 +958,8 @@ void arangodb::aql::moveCalculationsDownRule(Optimizer* opt,
} else if (currentType == EN::INDEX ||
currentType == EN::ENUMERATE_COLLECTION ||
currentType == EN::ENUMERATE_LIST ||
currentType == EN::TRAVERSAL ||
currentType == EN::SHORTEST_PATH ||
currentType == EN::TRAVERSAL ||
currentType == EN::SHORTEST_PATH ||
currentType == EN::COLLECT ||
currentType == EN::NORESULTS) {
// we will not push further down than such nodes
@ -1191,7 +1195,7 @@ void arangodb::aql::moveFiltersUpRule(Optimizer* opt, ExecutionPlan* plan,
// must not move a filter beyond a node that can throw
break;
}
if (current->isModificationNode()) {
// must not move a filter beyond a modification node
break;
@ -1249,7 +1253,7 @@ class arangodb::aql::RedundantCalculationsReplacer final
std::unordered_map<VariableId, Variable const*> const& replacements)
: _replacements(replacements) {
}
template <typename T>
void replaceStartTargetVariables(ExecutionNode* en) {
auto node = static_cast<T*>(en);
@ -1271,7 +1275,7 @@ class arangodb::aql::RedundantCalculationsReplacer final
auto node = static_cast<CalculationNode*>(en);
std::unordered_set<Variable const*> variables;
node->expression()->variables(variables);
// check if the calculation uses any of the variables that we want to
// replace
for (auto const& it : variables) {
@ -1304,12 +1308,12 @@ class arangodb::aql::RedundantCalculationsReplacer final
replaceInVariable<FilterNode>(en);
break;
}
case EN::TRAVERSAL: {
replaceInVariable<TraversalNode>(en);
break;
}
case EN::SHORTEST_PATH: {
replaceStartTargetVariables<ShortestPathNode>(en);
break;
@ -1331,7 +1335,7 @@ class arangodb::aql::RedundantCalculationsReplacer final
}
// node->_keepVariables does not need to be updated at the moment as the
// "remove-redundant-calculations" rule will stop when it finds a COLLECT
// with an INTO, and the "inline-subqueries" rule will abort there as well
// with an INTO, and the "inline-subqueries" rule will abort there as well
break;
}
@ -1352,7 +1356,7 @@ class arangodb::aql::RedundantCalculationsReplacer final
replaceInVariable<InsertNode>(en);
break;
}
case EN::UPSERT: {
auto node = static_cast<UpsertNode*>(en);
@ -1379,7 +1383,7 @@ class arangodb::aql::RedundantCalculationsReplacer final
}
break;
}
case EN::REPLACE: {
auto node = static_cast<ReplaceNode*>(en);
@ -1465,7 +1469,7 @@ void arangodb::aql::removeRedundantCalculationsRule(
continue;
}
bool const isEqual = (buffer.length() == referenceExpression.size() &&
bool const isEqual = (buffer.length() == referenceExpression.size() &&
memcmp(buffer.c_str(), referenceExpression.c_str(), buffer.length()) == 0);
buffer.reset();
@ -1571,7 +1575,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
continue;
}
// will remove subquery when we get here
}
}
auto outvars = n->getVariablesSetHere();
TRI_ASSERT(outvars.size() == 1);
@ -1589,13 +1593,13 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
// it's a temporary variable that we can fuse with the other
// calculation easily
if (n->canThrow() ||
if (n->canThrow() ||
!static_cast<CalculationNode*>(n)->expression()->isDeterministic()) {
continue;
}
AstNode const* rootNode = static_cast<CalculationNode*>(n)->expression()->node();
if (rootNode->type == NODE_TYPE_REFERENCE) {
// if the LET is a simple reference to another variable, e.g. LET a = b
// then replace all references to a with references to b
@ -1643,7 +1647,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
usageCount = 0;
break;
}
}
}
if (current->getType() != EN::CALCULATION) {
// don't know how to replace the variable in a non-LET node
// abort the search
@ -1651,7 +1655,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
break;
}
// got a LET. we can replace the variable reference in it by
// got a LET. we can replace the variable reference in it by
// something else
++usageCount;
other = static_cast<CalculationNode*>(current);
@ -1688,7 +1692,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
otherExpression->replaceVariableReference(outvars[0], rootNode);
toUnlink.emplace(n);
}
}
}
}
@ -1777,7 +1781,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
size_t coveredAttributes = 0;
auto resultPair = trx->getIndexForSortCondition(
enumerateCollectionNode->collection()->getName(),
&sortCondition, outVariable,
&sortCondition, outVariable,
enumerateCollectionNode->collection()->count(),
usedIndexes, coveredAttributes);
if (resultPair.second) {
@ -1856,7 +1860,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
// all indexes use the same attributes and index conditions guarantee
// sorted output
}
TRI_ASSERT(indexes.size() == 1 || cond->isSorted());
// if we get here, we either have one index or multiple indexes on the same
@ -1883,7 +1887,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
// order as the IndexNode...
// now check if the sort attributes match the ones of the index
size_t const numCovered =
sortCondition.coveredAttributes(outVariable, fields);
sortCondition.coveredAttributes(outVariable, fields);
if (numCovered >= sortCondition.numAttributes()) {
// sort condition is fully covered by index... now we can remove the
@ -1908,7 +1912,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
// now check if the index fields are the same as the sort condition
// fields
// e.g. FILTER c.value1 == 1 && c.value2 == 42 SORT c.value1, c.value2
size_t const numCovered =
size_t const numCovered =
sortCondition.coveredAttributes(outVariable, fields);
if (numCovered == sortCondition.numAttributes() &&
@ -1997,7 +2001,7 @@ void arangodb::aql::useIndexForSortRule(Optimizer* opt, ExecutionPlan* plan,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::SORT, true);
bool modified = false;
for (auto const& n : nodes) {
@ -2017,11 +2021,11 @@ void arangodb::aql::useIndexForSortRule(Optimizer* opt, ExecutionPlan* plan,
/// @brief try to remove filters which are covered by indexes
void arangodb::aql::removeFiltersCoveredByIndexRule(
Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) {
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::FILTER, true);
std::unordered_set<ExecutionNode*> toUnlink;
bool modified = false;
@ -2188,7 +2192,7 @@ void arangodb::aql::interchangeAdjacentEnumerationsRule(
auto dep = nwalker->getFirstDependency();
if (dep->getType() != EN::ENUMERATE_COLLECTION &&
if (dep->getType() != EN::ENUMERATE_COLLECTION &&
dep->getType() != EN::ENUMERATE_LIST) {
break;
}
@ -2284,7 +2288,7 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt, ExecutionPlan* plan,
if (arangodb::ServerState::instance()->isCoordinator()) {
// find subqueries
std::unordered_map<ExecutionNode*, ExecutionNode*> subqueries;
SmallVector<ExecutionNode*>::allocator_type::arena_type s;
SmallVector<ExecutionNode*> subs{s};
plan->findNodesOfType(subs, ExecutionNode::SUBQUERY, true);
@ -2726,7 +2730,7 @@ void arangodb::aql::distributeSortToClusterRule(Optimizer* opt,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::GATHER, true);
bool modified = false;
for (auto& n : nodes) {
@ -2804,7 +2808,7 @@ void arangodb::aql::distributeSortToClusterRule(Optimizer* opt,
/// only a SingletonNode and possibly some CalculationNodes as dependencies
void arangodb::aql::removeUnnecessaryRemoteScatterRule(
Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule) {
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::REMOTE, true);
@ -3177,7 +3181,7 @@ struct OrSimplifier {
Ast* ast;
explicit OrSimplifier(Ast* ast) : ast(ast) {}
std::string stringifyNode(AstNode const* node) const {
try {
return node->toString();
@ -3252,7 +3256,7 @@ struct OrSimplifier {
else {
values->addMember(lhs);
}
if (rightIsArray) {
size_t const n = rhs->numMembers();
for (size_t i = 0; i < n; ++i) {
@ -3274,16 +3278,16 @@ struct OrSimplifier {
if (node->type == NODE_TYPE_OPERATOR_BINARY_OR) {
auto lhs = node->getMember(0);
auto rhs = node->getMember(1);
auto lhsNew = simplify(lhs);
auto rhsNew = simplify(rhs);
if (lhs != lhsNew || rhs != rhsNew) {
// create a modified node
node = ast->createNodeBinaryOperator(node->type, lhsNew, rhsNew);
}
if ((lhsNew->type == NODE_TYPE_OPERATOR_BINARY_EQ || lhsNew->type == NODE_TYPE_OPERATOR_BINARY_IN) &&
if ((lhsNew->type == NODE_TYPE_OPERATOR_BINARY_EQ || lhsNew->type == NODE_TYPE_OPERATOR_BINARY_IN) &&
(rhsNew->type == NODE_TYPE_OPERATOR_BINARY_EQ || rhsNew->type == NODE_TYPE_OPERATOR_BINARY_IN)) {
std::string leftName;
std::string rightName;
@ -3293,8 +3297,8 @@ struct OrSimplifier {
AstNode const* rightValue = nullptr;
for (size_t i = 0; i < 4; ++i) {
if (detect(lhsNew, i >= 2, leftName, leftAttr, leftValue) &&
detect(rhsNew, i % 2 == 0, rightName, rightAttr, rightValue) &&
if (detect(lhsNew, i >= 2, leftName, leftAttr, leftValue) &&
detect(rhsNew, i % 2 == 0, rightName, rightAttr, rightValue) &&
leftName == rightName) {
return buildValues(leftAttr, leftValue, lhsNew->type == NODE_TYPE_OPERATOR_BINARY_IN, rightValue, rhsNew->type == NODE_TYPE_OPERATOR_BINARY_IN);
}
@ -3304,11 +3308,11 @@ struct OrSimplifier {
// return node as is
return const_cast<AstNode*>(node);
}
if (node->type == NODE_TYPE_OPERATOR_BINARY_AND) {
auto lhs = node->getMember(0);
auto rhs = node->getMember(1);
auto lhsNew = simplify(lhs);
auto rhsNew = simplify(rhs);
@ -3319,7 +3323,7 @@ struct OrSimplifier {
// fallthrough intentional
}
return const_cast<AstNode*>(node);
}
};
@ -3355,7 +3359,7 @@ void arangodb::aql::replaceOrWithInRule(Optimizer* opt, ExecutionPlan* plan,
if (outVar.size() != 1 || outVar[0]->id != inVar[0]->id) {
continue;
}
auto root = cn->expression()->node();
OrSimplifier simplifier(plan->getAst());
@ -3364,7 +3368,7 @@ void arangodb::aql::replaceOrWithInRule(Optimizer* opt, ExecutionPlan* plan,
if (newRoot != root) {
ExecutionNode* newNode = nullptr;
Expression* expr = new Expression(plan->getAst(), newRoot);
try {
TRI_IF_FAILURE("OptimizerRules::replaceOrWithInRuleOom") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
@ -3607,7 +3611,7 @@ void arangodb::aql::patchUpdateStatementsRule(Optimizer* opt,
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
plan->findNodesOfType(nodes, EN::UPDATE, false);
bool modified = false;
for (auto const& n : nodes) {
@ -3680,16 +3684,16 @@ void arangodb::aql::optimizeTraversalsRule(Optimizer* opt,
opt->addPlan(plan, rule, false);
return;
}
bool modified = false;
// first make a pass over all traversal nodes and remove unused
// variables from them
// first make a pass over all traversal nodes and remove unused
// variables from them
for (auto const& n : tNodes) {
TraversalNode* traversal = static_cast<TraversalNode*>(n);
auto varsUsedLater = n->getVarsUsedLater();
// note that we can NOT optimize away the vertex output variable
// yet, as many traversal internals depend on the number of vertices
// found/built
@ -3700,7 +3704,7 @@ void arangodb::aql::optimizeTraversalsRule(Optimizer* opt,
traversal->setEdgeOutput(nullptr);
modified = true;
}
outVariable = traversal->pathOutVariable();
if (outVariable != nullptr &&
varsUsedLater.find(outVariable) == varsUsedLater.end()) {
@ -3738,16 +3742,16 @@ void arangodb::aql::prepareTraversalsRule(Optimizer* opt,
opt->addPlan(plan, rule, false);
return;
}
// first make a pass over all traversal nodes and remove unused
// variables from them
// first make a pass over all traversal nodes and remove unused
// variables from them
for (auto const& n : tNodes) {
TraversalNode* traversal = static_cast<TraversalNode*>(n);
traversal->prepareOptions();
}
opt->addPlan(plan, rule, true);
}
}
/// @brief pulls out simple subqueries and merges them with the level above
///
@ -3755,17 +3759,17 @@ void arangodb::aql::prepareTraversalsRule(Optimizer* opt,
///
/// FOR x IN (
/// FOR y IN collection FILTER y.value >= 5 RETURN y.test
/// )
/// )
/// RETURN x.a
///
/// then this rule will transform it into:
///
///
/// FOR tmp IN collection
/// FILTER tmp.value >= 5
/// FILTER tmp.value >= 5
/// LET x = tmp.test
/// RETURN x.a
void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
ExecutionPlan* plan,
void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
ExecutionPlan* plan,
Optimizer::Rule const* rule) {
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
@ -3781,12 +3785,12 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
for (auto const& n : nodes) {
auto subqueryNode = static_cast<SubqueryNode*>(n);
if (subqueryNode->isModificationQuery()) {
// can't modify modifying subqueries
continue;
}
if (subqueryNode->canThrow()) {
// can't inline throwing subqueries
continue;
@ -3845,10 +3849,10 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
auto previous = n->getFirstDependency();
auto insert = n->getFirstParent();
TRI_ASSERT(insert != nullptr);
// unlink the original SubqueryNode
plan->unlinkNode(n, false);
for (auto& it : subNodes) {
// first unlink them all
plan->unlinkNode(it, true);
@ -3872,7 +3876,7 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
queryVariables->renameVariable(variable->id);
}
}
}
}
// link the top node in the subquery with the original plan
if (previous != nullptr) {
@ -3883,13 +3887,13 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
plan->unlinkNode(listNode, false);
queryVariables->renameVariable(returnNode->inVariable()->id, listNode->outVariable()->name);
// finally replace the variables
std::unordered_map<VariableId, Variable const*> replacements;
replacements.emplace(listNode->outVariable()->id, returnNode->inVariable());
RedundantCalculationsReplacer finder(replacements);
plan->root()->walk(&finder);
// abort optimization
current = nullptr;
}
@ -3898,8 +3902,8 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
if (current == nullptr) {
break;
}
varsUsed.clear();
varsUsed.clear();
current->getVariablesUsedHere(varsUsed);
if (varsUsed.find(out) != varsUsed.end()) {
// we found another node that uses the subquery variable
@ -3914,3 +3918,196 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
opt->addPlan(plan, rule, modified);
}
struct GeoIndexInfo {
EnumerateCollectionNode* _collectionNode;
Collection const* _collection;
std::shared_ptr<arangodb::Index> _index;
std::vector<std::string> _longitude;
std::vector<std::string> _latitude;
};
// TODO - remove debug code
#ifdef OBIDEBUG
#define OBILEVEL ERR
#else
#define OBILEVEL TRACE
#endif
static boost::optional<GeoIndexInfo>
geoDistanceFunctionArgCheck(std::pair<AstNode*,AstNode*> const& pair, ExecutionNode* ex, ExecutionPlan* plan){
using SV = std::vector<std::string>;
LOG(OBILEVEL) << " enter argument check";
// first and second should be based on the same document - need to provide the document
// in order to see which collection is bound to it and if that collections supports geo-index
if( !pair.first->isAttributeAccessForVariable() || !pair.second->isAttributeAccessForVariable()){
LOG(OBILEVEL) << " not both args are of type attribute access";
return boost::none;
}
// expect access of the for doc.attribute
// TODO: more complex access path have to be added: loop until REFERENCE TYPE IS FOUND
auto setter1 = plan->getVarSetBy(static_cast<Variable const*>(pair.first->getMember(0)->getData())->id);
auto setter2 = plan->getVarSetBy(static_cast<Variable const*>(pair.second->getMember(0)->getData())->id);
SV accessPath1{pair.first->getString()};
SV accessPath2{pair.second->getString()};
LOG(OBILEVEL) << " got setter";
if(setter1 == setter2){
if(setter1->getType() == EN::ENUMERATE_COLLECTION){
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
auto lcoll = coll->getCollection();
// TODO - check collection for suitable geo-indexes
LOG(OBILEVEL) << " SETTER IS ENUMERATE_COLLECTION: " << coll->getName();
for(auto indexShardPtr : lcoll->getIndexes()){
// get real index
arangodb::Index& index = *indexShardPtr.get();
// check if current index is a geo-index
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX){
continue;
}
#ifdef OBIDEBUG
//FIXME - REMOVE DEBUG CODE LATER
auto vecs = std::vector<std::vector<SV>>{index.fieldNames(), std::vector<SV>{accessPath1, accessPath2}};
for(auto vec : vecs ){
for(auto path : vec){
std::cout << "AccessPath VECTOR: ";
for(auto word : path){
std::cout << word << " ";
}
std::cout << std::endl;
}
}
#endif
//check access paths of attribues in ast and those in index match
if( index.fieldNames()[0] == accessPath1 && index.fieldNames()[1] == accessPath2 ){
return GeoIndexInfo{collNode, coll, indexShardPtr, std::move(accessPath1), std::move(accessPath2) };
}
}
}
}
return boost::none;
}
void arangodb::aql::optimizeGeoIndexRule(Optimizer* opt,
ExecutionPlan* plan,
Optimizer::Rule const* rule) {
LOG(OBILEVEL) << "ENTER GEO RULE";
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
SmallVector<ExecutionNode*> nodes{a};
bool modified = false;
plan->findNodesOfType(nodes, EN::SORT, true);
for (auto const& n : nodes) {
auto node = static_cast<SortNode*>(n);
auto const& elements = node->getElements();
// we're looking for "SORT DISTANCE(x,y,a,b) ASC", which has just one sort criterion
if ( !(elements.size() == 1 && elements[0].second)) {
continue;
}
//variable of sort expression
auto const variable = elements[0].first;
TRI_ASSERT(variable != nullptr);
//// find the expression that is bound to the variable
// get the expression node that holds the cacluation
auto setter = plan->getVarSetBy(variable->id);
if (setter == nullptr || setter->getType() != EN::CALCULATION) {
continue;
}
// downcast to calculation node and get expression
auto cn = static_cast<CalculationNode*>(setter);
auto const expression = cn->expression();
// the expression must exist and it must be a function call
if (expression == nullptr || expression->node() == nullptr ||
expression->node()->type != NODE_TYPE_FCALL) {
// not the right type of node
continue;
}
//get the ast node of the expression
AstNode const* funcNode = expression->node();
auto func = static_cast<Function const*>(funcNode->getData());
// we're looking for "DISTANCE()", which is a function call
// with an empty parameters array
if ( func->externalName != "DISTANCE" || funcNode->numMembers() != 1 ) {
continue;
}
LOG(OBILEVEL) << " FOUND DISTANCE RULE";
auto const& distanceArgs = funcNode->getMember(0);
if(distanceArgs->numMembers() != 4){
continue;
}
std::pair<AstNode*,AstNode*> argPair1 = { distanceArgs->getMember(0), distanceArgs->getMember(1) };
std::pair<AstNode*,AstNode*> argPair2 = { distanceArgs->getMember(2), distanceArgs->getMember(3) };
auto result1 = geoDistanceFunctionArgCheck(argPair1, node, plan);
auto result2 = geoDistanceFunctionArgCheck(argPair2, node, plan);
// xor only one argument pair shall have a geoIndex
if ( ( !result1 && !result2 ) || ( result1 && result2 ) ){
continue;
}
LOG(OBILEVEL) << " FOUND DISTANCE RULE WITH ATTRIBUTE ACCESS";
if(!result1){
result1 = std::move(result2);
}
LOG(OBILEVEL) << " attributes: " << result1.get()._longitude[0]
<< ", " << result1.get()._longitude
<< " of collection:" << result1.get()._collection->getName()
<< " are geoindexed";
break; //remove this to make use of the index
auto cnode = result1.get()._collectionNode;
auto& idxPtr = result1.get()._index;
//create new index node and register it
auto condition = std::make_unique<Condition>(plan->getAst()); //What is this condition exactly about
condition->normalize(plan);
auto inode = new IndexNode(
plan, plan->nextId(), cnode->vocbase(),
cnode->collection(), cnode->outVariable(),
std::vector<Transaction::IndexHandle>{Transaction::IndexHandle{idxPtr}},
condition.get(), !elements[0].second);
plan->registerNode(inode);
condition.release();
plan->unlinkNode(n);
plan->replaceNode(cnode,inode);
//signal that plan has been changed
modified=true;
}
opt->addPlan(plan, rule, modified);
LOG(OBILEVEL) << "EXIT GEO RULE";
LOG(OBILEVEL) << "";
}

View File

@ -198,6 +198,7 @@ void prepareTraversalsRule(Optimizer* opt, ExecutionPlan* plan,
/// @brief moves simple subqueries one level higher
void inlineSubqueriesRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void optimizeGeoIndexRule(Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule);
} // namespace aql
} // namespace arangodb

View File

@ -62,62 +62,68 @@ void MMFilesCleanupThread::run() {
++iterations;
if (state == TRI_vocbase_t::State::SHUTDOWN_COMPACTOR ||
state == TRI_vocbase_t::State::SHUTDOWN_CLEANUP) {
// cursors must be cleaned before collections are handled
// otherwise the cursors may still hold barriers on collections
// and collections cannot be closed properly
cleanupCursors(true);
}
// check if we can get the compactor lock exclusively
// check if compaction is currently disallowed
engine->tryPreventCompaction(_vocbase, [this, &collections, &iterations](TRI_vocbase_t* vocbase) {
try {
// copy all collections
collections = vocbase->collections(true);
} catch (...) {
collections.clear();
try {
if (state == TRI_vocbase_t::State::SHUTDOWN_COMPACTOR ||
state == TRI_vocbase_t::State::SHUTDOWN_CLEANUP) {
// cursors must be cleaned before collections are handled
// otherwise the cursors may still hold barriers on collections
// and collections cannot be closed properly
cleanupCursors(true);
}
for (auto& collection : collections) {
TRI_ASSERT(collection != nullptr);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_LOADED &&
status != TRI_VOC_COL_STATUS_UNLOADING &&
status != TRI_VOC_COL_STATUS_DELETED) {
continue;
}
// we're the only ones that can unload the collection, so using
// the collection pointer outside the lock is ok
// maybe cleanup indexes, unload the collection or some datafiles
// clean indexes?
if (iterations % cleanupIndexIterations() == 0) {
collection->cleanupIndexes();
// check if we can get the compactor lock exclusively
// check if compaction is currently disallowed
engine->tryPreventCompaction(_vocbase, [this, &collections, &iterations](TRI_vocbase_t* vocbase) {
try {
// copy all collections
collections = vocbase->collections(true);
} catch (...) {
collections.clear();
}
cleanupCollection(collection);
for (auto& collection : collections) {
TRI_ASSERT(collection != nullptr);
TRI_vocbase_col_status_e status = collection->getStatusLocked();
if (status != TRI_VOC_COL_STATUS_LOADED &&
status != TRI_VOC_COL_STATUS_UNLOADING &&
status != TRI_VOC_COL_STATUS_DELETED) {
continue;
}
// we're the only ones that can unload the collection, so using
// the collection pointer outside the lock is ok
// maybe cleanup indexes, unload the collection or some datafiles
// clean indexes?
if (iterations % cleanupIndexIterations() == 0) {
collection->cleanupIndexes();
}
cleanupCollection(collection);
}
}, false);
// server is still running, clean up unused cursors
if (iterations % cleanupCursorIterations() == 0) {
cleanupCursors(false);
// clean up expired compactor locks
engine->cleanupCompactionBlockers(_vocbase);
}
}, false);
// server is still running, clean up unused cursors
if (iterations % cleanupCursorIterations() == 0) {
cleanupCursors(false);
if (state == TRI_vocbase_t::State::NORMAL) {
CONDITION_LOCKER(locker, _condition);
locker.wait(cleanupInterval());
} else {
// prevent busy waiting
usleep(10000);
}
// clean up expired compactor locks
engine->cleanupCompactionBlockers(_vocbase);
}
if (state == TRI_vocbase_t::State::NORMAL) {
CONDITION_LOCKER(locker, _condition);
locker.wait(cleanupInterval());
} else {
// prevent busy waiting
usleep(10000);
} catch (...) {
// simply ignore the errors here
}
if (state == TRI_vocbase_t::State::SHUTDOWN_CLEANUP || isStopping()) {

View File

@ -813,13 +813,15 @@ bool MMFilesCollection::iterateDatafiles(std::function<bool(TRI_df_marker_t cons
bool MMFilesCollection::iterateDatafilesVector(std::vector<TRI_datafile_t*> const& files,
std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) {
for (auto const& datafile : files) {
datafile->sequentialAccess();
datafile->willNeed();
if (!TRI_IterateDatafile(datafile, cb)) {
return false;
}
if (datafile->isPhysical() && datafile->_isSealed) {
TRI_MMFileAdvise(datafile->_data, datafile->maximalSize(),
TRI_MADVISE_RANDOM);
datafile->randomAccess();
}
}

View File

@ -251,8 +251,8 @@ MMFilesCompactorThread::CompactionInitialContext MMFilesCompactorThread::getComp
// We will sequentially scan the logfile for collection:
if (df->isPhysical()) {
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_SEQUENTIAL);
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_WILLNEED);
df->sequentialAccess();
df->willNeed();
}
if (i == 0) {
@ -323,7 +323,7 @@ MMFilesCompactorThread::CompactionInitialContext MMFilesCompactorThread::getComp
}
if (df->isPhysical()) {
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_RANDOM);
df->randomAccess();
}
if (!ok) {
@ -840,102 +840,107 @@ void MMFilesCompactorThread::run() {
// compaction loop
TRI_vocbase_t::State state = _vocbase->state();
engine->tryPreventCompaction(_vocbase, [this, &numCompacted, &collections](TRI_vocbase_t* vocbase) {
// compaction is currently allowed
numCompacted = 0;
try {
// copy all collections
collections = _vocbase->collections(false);
} catch (...) {
collections.clear();
}
try {
engine->tryPreventCompaction(_vocbase, [this, &numCompacted, &collections](TRI_vocbase_t* vocbase) {
// compaction is currently allowed
numCompacted = 0;
try {
// copy all collections
collections = _vocbase->collections(false);
} catch (...) {
collections.clear();
}
for (auto& collection : collections) {
bool worked = false;
for (auto& collection : collections) {
bool worked = false;
auto callback = [this, &collection, &worked]() -> void {
if (collection->status() != TRI_VOC_COL_STATUS_LOADED &&
collection->status() != TRI_VOC_COL_STATUS_UNLOADING) {
return;
}
bool doCompact = collection->doCompact();
// for document collection, compactify datafiles
if (collection->status() == TRI_VOC_COL_STATUS_LOADED && doCompact) {
// check whether someone else holds a read-lock on the compaction
// lock
TryCompactionLocker compactionLocker(collection);
if (!compactionLocker.isLocked()) {
// someone else is holding the compactor lock, we'll not compact
auto callback = [this, &collection, &worked]() -> void {
if (collection->status() != TRI_VOC_COL_STATUS_LOADED &&
collection->status() != TRI_VOC_COL_STATUS_UNLOADING) {
return;
}
try {
double const now = TRI_microtime();
if (collection->lastCompactionStamp() + compactionCollectionInterval() <= now) {
auto ce = collection->ditches()->createCompactionDitch(__FILE__,
__LINE__);
bool doCompact = collection->doCompact();
if (ce == nullptr) {
// out of memory
LOG_TOPIC(WARN, Logger::COMPACTOR) << "out of memory when trying to create compaction ditch";
} else {
try {
bool wasBlocked = false;
worked = compactCollection(collection, wasBlocked);
// for document collection, compactify datafiles
if (collection->status() == TRI_VOC_COL_STATUS_LOADED && doCompact) {
// check whether someone else holds a read-lock on the compaction
// lock
if (!worked && !wasBlocked) {
// set compaction stamp
collection->lastCompactionStamp(now);
}
// if we worked or were blocked, then we don't set the compaction stamp to
// force another round of compaction
} catch (...) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "an unknown exception occurred during compaction";
// in case an error occurs, we must still free this ditch
}
TryCompactionLocker compactionLocker(collection);
collection->ditches()->freeDitch(ce);
}
if (!compactionLocker.isLocked()) {
// someone else is holding the compactor lock, we'll not compact
return;
}
try {
double const now = TRI_microtime();
if (collection->lastCompactionStamp() + compactionCollectionInterval() <= now) {
auto ce = collection->ditches()->createCompactionDitch(__FILE__,
__LINE__);
if (ce == nullptr) {
// out of memory
LOG_TOPIC(WARN, Logger::COMPACTOR) << "out of memory when trying to create compaction ditch";
} else {
try {
bool wasBlocked = false;
worked = compactCollection(collection, wasBlocked);
if (!worked && !wasBlocked) {
// set compaction stamp
collection->lastCompactionStamp(now);
}
// if we worked or were blocked, then we don't set the compaction stamp to
// force another round of compaction
} catch (...) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "an unknown exception occurred during compaction";
// in case an error occurs, we must still free this ditch
}
collection->ditches()->freeDitch(ce);
}
}
} catch (...) {
// in case an error occurs, we must still relase the lock
LOG_TOPIC(ERR, Logger::COMPACTOR) << "an unknown exception occurred during compaction";
}
} catch (...) {
// in case an error occurs, we must still relase the lock
LOG_TOPIC(ERR, Logger::COMPACTOR) << "an unknown exception occurred during compaction";
}
};
if (!collection->tryExecuteWhileStatusLocked(callback)) {
continue;
}
};
if (!collection->tryExecuteWhileStatusLocked(callback)) {
continue;
if (worked) {
++numCompacted;
// signal the cleanup thread that we worked and that it can now wake
// up
CONDITION_LOCKER(locker, _condition);
locker.signal();
}
}
}, true);
if (worked) {
++numCompacted;
// signal the cleanup thread that we worked and that it can now wake
// up
CONDITION_LOCKER(locker, _condition);
locker.signal();
}
if (numCompacted > 0) {
// no need to sleep long or go into wait state if we worked.
// maybe there's still work left
usleep(1000);
} else if (state != TRI_vocbase_t::State::SHUTDOWN_COMPACTOR && _vocbase->state() == TRI_vocbase_t::State::NORMAL) {
// only sleep while server is still running
CONDITION_LOCKER(locker, _condition);
_condition.wait(compactionSleepTime());
}
}, true);
if (numCompacted > 0) {
// no need to sleep long or go into wait state if we worked.
// maybe there's still work left
usleep(1000);
} else if (state != TRI_vocbase_t::State::SHUTDOWN_COMPACTOR && _vocbase->state() == TRI_vocbase_t::State::NORMAL) {
// only sleep while server is still running
CONDITION_LOCKER(locker, _condition);
_condition.wait(compactionSleepTime());
}
if (state == TRI_vocbase_t::State::SHUTDOWN_COMPACTOR || isStopping()) {
// server shutdown or database has been removed
break;
if (state == TRI_vocbase_t::State::SHUTDOWN_COMPACTOR || isStopping()) {
// server shutdown or database has been removed
break;
}
} catch (...) {
// caught an error during compaction. simply ignore it and go on
}
}

View File

@ -256,12 +256,12 @@ bool CursorRepository::containsUsedCursor() {
////////////////////////////////////////////////////////////////////////////////
bool CursorRepository::garbageCollect(bool force) {
std::vector<arangodb::Cursor*> found;
found.reserve(MaxCollectCount);
auto const now = TRI_microtime();
std::vector<arangodb::Cursor*> found;
try {
found.reserve(MaxCollectCount);
{
MUTEX_LOCKER(mutexLocker, _lock);
for (auto it = _cursors.begin(); it != _cursors.end(); /* no hoisting */) {
@ -293,6 +293,8 @@ bool CursorRepository::garbageCollect(bool force) {
++it;
}
}
} catch (...) {
// go on and remove whatever we found so far
}
// remove cursors outside the lock

View File

@ -244,12 +244,18 @@ void V8DealerFeature::start() {
}
void V8DealerFeature::unprepare() {
// turn off memory allocation failures before going into v8 code
TRI_DisallowMemoryFailures();
shutdownContexts();
// delete GC thread after all action threads have been stopped
delete _gcThread;
DEALER = nullptr;
// turn on memory allocation failures again
TRI_AllowMemoryFailures();
}
bool V8DealerFeature::addGlobalContextMethod(std::string const& method) {
@ -288,115 +294,126 @@ void V8DealerFeature::collectGarbage() {
uint64_t const reducedWaitTime =
static_cast<uint64_t>(_gcFrequency * 1000.0 * 200.0);
// turn off memory allocation failures before going into v8 code
TRI_DisallowMemoryFailures();
while (_stopping == 0) {
V8Context* context = nullptr;
bool wasDirty = false;
{
bool gotSignal = false;
preferFree = !preferFree;
CONDITION_LOCKER(guard, _contextCondition);
if (_dirtyContexts.empty()) {
uint64_t waitTime = useReducedWait ? reducedWaitTime : regularWaitTime;
// we'll wait for a signal or a timeout
gotSignal = guard.wait(waitTime);
}
if (preferFree && !_freeContexts.empty()) {
context = pickFreeContextForGc();
}
if (context == nullptr && !_dirtyContexts.empty()) {
context = _dirtyContexts.back();
_dirtyContexts.pop_back();
if (context->_numExecutions < 50 && !context->_hasActiveExternals) {
// don't collect this one yet. it doesn't have externals, so there
// is no urge for garbage collection
_freeContexts.emplace_back(context);
context = nullptr;
} else {
wasDirty = true;
}
}
if (context == nullptr && !preferFree && !gotSignal &&
!_freeContexts.empty()) {
// we timed out waiting for a signal, so we have idle time that we can
// spend on running the GC pro-actively
// We'll pick one of the free contexts and clean it up
context = pickFreeContextForGc();
}
// there is no context to clean up, probably they all have been cleaned up
// already. increase the wait time so we don't cycle too much in the GC
// loop
// and waste CPU unnecessary
useReducedWait = (context != nullptr);
}
// update last gc time
double lastGc = TRI_microtime();
gc->updateGcStamp(lastGc);
if (context != nullptr) {
arangodb::CustomWorkStack custom("V8 GC", (uint64_t)context->_id);
LOG(TRACE) << "collecting V8 garbage in context #" << context->_id
<< ", numExecutions: " << context->_numExecutions
<< ", hasActive: " << context->_hasActiveExternals
<< ", wasDirty: " << wasDirty;
bool hasActiveExternals = false;
auto isolate = context->_isolate;
TRI_ASSERT(context->_locker == nullptr);
context->_locker = new v8::Locker(isolate);
isolate->Enter();
{
v8::HandleScope scope(isolate);
auto localContext =
v8::Local<v8::Context>::New(isolate, context->_context);
localContext->Enter();
{
v8::Context::Scope contextScope(localContext);
TRI_ASSERT(context->_locker->IsLocked(isolate));
TRI_ASSERT(v8::Locker::IsLocked(isolate));
TRI_GET_GLOBALS();
TRI_RunGarbageCollectionV8(isolate, 1.0);
hasActiveExternals = v8g->hasActiveExternals();
}
localContext->Exit();
}
isolate->Exit();
delete context->_locker;
context->_locker = nullptr;
// update garbage collection statistics
context->_hasActiveExternals = hasActiveExternals;
context->_numExecutions = 0;
context->_lastGcStamp = lastGc;
try {
V8Context* context = nullptr;
bool wasDirty = false;
{
bool gotSignal = false;
preferFree = !preferFree;
CONDITION_LOCKER(guard, _contextCondition);
if (wasDirty) {
_freeContexts.emplace_back(context);
} else {
_freeContexts.insert(_freeContexts.begin(), context);
if (_dirtyContexts.empty()) {
uint64_t waitTime = useReducedWait ? reducedWaitTime : regularWaitTime;
// we'll wait for a signal or a timeout
gotSignal = guard.wait(waitTime);
}
guard.broadcast();
if (preferFree && !_freeContexts.empty()) {
context = pickFreeContextForGc();
}
if (context == nullptr && !_dirtyContexts.empty()) {
context = _dirtyContexts.back();
_dirtyContexts.pop_back();
if (context->_numExecutions < 50 && !context->_hasActiveExternals) {
// don't collect this one yet. it doesn't have externals, so there
// is no urge for garbage collection
_freeContexts.emplace_back(context);
context = nullptr;
} else {
wasDirty = true;
}
}
if (context == nullptr && !preferFree && !gotSignal &&
!_freeContexts.empty()) {
// we timed out waiting for a signal, so we have idle time that we can
// spend on running the GC pro-actively
// We'll pick one of the free contexts and clean it up
context = pickFreeContextForGc();
}
// there is no context to clean up, probably they all have been cleaned up
// already. increase the wait time so we don't cycle too much in the GC
// loop
// and waste CPU unnecessary
useReducedWait = (context != nullptr);
}
} else {
useReducedWait = false; // sanity
// update last gc time
double lastGc = TRI_microtime();
gc->updateGcStamp(lastGc);
if (context != nullptr) {
arangodb::CustomWorkStack custom("V8 GC", (uint64_t)context->_id);
LOG(TRACE) << "collecting V8 garbage in context #" << context->_id
<< ", numExecutions: " << context->_numExecutions
<< ", hasActive: " << context->_hasActiveExternals
<< ", wasDirty: " << wasDirty;
bool hasActiveExternals = false;
auto isolate = context->_isolate;
TRI_ASSERT(context->_locker == nullptr);
context->_locker = new v8::Locker(isolate);
isolate->Enter();
{
v8::HandleScope scope(isolate);
auto localContext =
v8::Local<v8::Context>::New(isolate, context->_context);
localContext->Enter();
{
v8::Context::Scope contextScope(localContext);
TRI_ASSERT(context->_locker->IsLocked(isolate));
TRI_ASSERT(v8::Locker::IsLocked(isolate));
TRI_GET_GLOBALS();
TRI_RunGarbageCollectionV8(isolate, 1.0);
hasActiveExternals = v8g->hasActiveExternals();
}
localContext->Exit();
}
isolate->Exit();
delete context->_locker;
context->_locker = nullptr;
// update garbage collection statistics
context->_hasActiveExternals = hasActiveExternals;
context->_numExecutions = 0;
context->_lastGcStamp = lastGc;
{
CONDITION_LOCKER(guard, _contextCondition);
if (wasDirty) {
_freeContexts.emplace_back(context);
} else {
_freeContexts.insert(_freeContexts.begin(), context);
}
guard.broadcast();
}
} else {
useReducedWait = false; // sanity
}
} catch (...) {
// simply ignore errors here
useReducedWait = false;
}
}
}
// turn on memory allocation failures again
TRI_AllowMemoryFailures();
_gcFinished = true;
}
@ -539,6 +556,9 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
TRI_ASSERT(context->_isolate != nullptr);
auto isolate = context->_isolate;
// turn off memory allocation failures before going into v8 code
TRI_DisallowMemoryFailures();
TRI_ASSERT(context->_locker == nullptr);
context->_locker = new v8::Locker(isolate);
@ -708,6 +728,9 @@ void V8DealerFeature::exitContext(V8Context* context) {
guard.broadcast();
}
// turn on memory allocation failures again
TRI_AllowMemoryFailures();
}
void V8DealerFeature::defineContextUpdate(

View File

@ -42,6 +42,14 @@ RevisionCacheChunkAllocator::RevisionCacheChunkAllocator(uint32_t defaultChunkSi
}
RevisionCacheChunkAllocator::~RevisionCacheChunkAllocator() {
try {
while (garbageCollect()) {
// garbage collect until nothing more to do
}
} catch (...) {
// must not throw here
}
// free all chunks
WRITE_LOCKER(locker, _chunksLock);
@ -98,7 +106,7 @@ RevisionCacheChunk* RevisionCacheChunkAllocator::orderChunk(CollectionRevisionsC
uint32_t const targetSize = RevisionCacheChunk::alignSize((std::max)(valueSize, chunkSize), blockSize());
{
// first check if there's a chunk ready on the freelist
READ_LOCKER(locker, _chunksLock);
WRITE_LOCKER(locker, _chunksLock);
if (!_freeList.empty()) {
RevisionCacheChunk* chunk = _freeList.back();
@ -155,6 +163,7 @@ void RevisionCacheChunkAllocator::returnUsed(ReadCache* cache, RevisionCacheChun
{
MUTEX_LOCKER(locker, _gcLock);
auto it = _fullChunks.find(cache);
if (it == _fullChunks.end()) {
@ -208,6 +217,7 @@ bool RevisionCacheChunkAllocator::garbageCollect() {
chunk = _freeList.back();
_freeList.pop_back();
TRI_ASSERT(chunk != nullptr);
// fix statistics here already, because we already have the lock
TRI_ASSERT(_totalAllocated >= chunk->size());
_totalAllocated -= chunk->size();

View File

@ -154,7 +154,7 @@ static int CreateDatafile(std::string const& filename, TRI_voc_size_t maximalSiz
TRI_ERRORBUF;
// open the file
int fd = TRI_CREATE(filename.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
int fd = TRI_CREATE(filename.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC | TRI_NOATIME,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
TRI_IF_FAILURE("CreateDatafile1") {
@ -557,6 +557,22 @@ int TRI_datafile_t::reserveElement(TRI_voc_size_t size, TRI_df_marker_t** positi
return TRI_ERROR_NO_ERROR;
}
void TRI_datafile_t::sequentialAccess() {
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_SEQUENTIAL);
}
void TRI_datafile_t::randomAccess() {
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_RANDOM);
}
void TRI_datafile_t::willNeed() {
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_WILLNEED);
}
void TRI_datafile_t::dontNeed() {
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_DONTNEED);
}
int TRI_datafile_t::lockInMemory() {
TRI_ASSERT(!_lockedInMemory);
@ -876,7 +892,7 @@ int TRI_datafile_t::seal() {
if (isPhysical()) {
// From now on we predict random access (until collection or compaction):
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_RANDOM);
randomAccess();
}
return TRI_ERROR_NO_ERROR;
@ -1013,7 +1029,7 @@ TRI_datafile_t::TRI_datafile_t(std::string const& filename, int fd, void* mmHand
TRI_ASSERT(fd >= 0);
// Advise OS that sequential access is going to happen:
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_SEQUENTIAL);
sequentialAccess();
}
}
@ -1791,8 +1807,8 @@ TRI_datafile_t* TRI_datafile_t::open(std::string const& filename, bool ignoreFai
}
// Advise on sequential use:
TRI_MMFileAdvise(datafile->_data, datafile->_maximalSize, TRI_MADVISE_SEQUENTIAL);
TRI_MMFileAdvise(datafile->_data, datafile->_maximalSize, TRI_MADVISE_WILLNEED);
datafile->sequentialAccess();
datafile->willNeed();
return datafile.release();
}
@ -1812,7 +1828,7 @@ TRI_datafile_t* TRI_datafile_t::openHelper(std::string const& filename, bool ign
// attempt to open a datafile file
// ..........................................................................
int fd = TRI_OPEN(filename.c_str(), O_RDWR | TRI_O_CLOEXEC);
int fd = TRI_OPEN(filename.c_str(), O_RDWR | TRI_O_CLOEXEC | TRI_NOATIME);
if (fd < 0) {
TRI_SYSTEM_ERROR();

View File

@ -231,6 +231,11 @@ struct TRI_datafile_t {
int reserveElement(TRI_voc_size_t size, TRI_df_marker_t** position,
TRI_voc_size_t maximalJournalSize);
void sequentialAccess();
void randomAccess();
void willNeed();
void dontNeed();
int lockInMemory();
int unlockFromMemory();

View File

@ -712,9 +712,9 @@ int CollectorThread::collect(Logfile* logfile) {
}
// We will sequentially scan the logfile for collection:
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_SEQUENTIAL);
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_WILLNEED);
TRI_DEFER(TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_RANDOM));
df->sequentialAccess();
df->willNeed();
TRI_DEFER(df->randomAccess());
// create a state for the collector, beginning with the list of failed
// transactions
@ -756,14 +756,30 @@ int CollectorThread::collect(Logfile* logfile) {
collectionIds.emplace(cid);
}
}
OperationsType sortedOperations;
// now for each collection, write all surviving markers into collection
// datafiles
for (auto it = collectionIds.begin(); it != collectionIds.end(); ++it) {
auto cid = (*it);
OperationsType sortedOperations;
// calculate required size for sortedOperations vector
sortedOperations.clear();
{
size_t requiredSize = 0;
auto it1 = state.structuralOperations.find(cid);
if (it1 != state.structuralOperations.end()) {
requiredSize += (*it1).second.size();
}
auto it2 = state.documentOperations.find(cid);
if (it2 != state.documentOperations.end()) {
requiredSize += (*it2).second.size();
}
sortedOperations.reserve(requiredSize);
}
// insert structural operations - those are already sorted by tick
if (state.structuralOperations.find(cid) !=
state.structuralOperations.end()) {

View File

@ -246,8 +246,8 @@ void LogfileManager::prepare() {
auto databasePath = ApplicationServer::getFeature<DatabasePathFeature>("DatabasePath");
_databasePath = databasePath->directory();
std::string const shutdownFile = shutdownFilename();
bool const shutdownFileExists = basics::FileUtils::exists(shutdownFile);
_shutdownFile = shutdownFilename();
bool const shutdownFileExists = basics::FileUtils::exists(_shutdownFile);
if (shutdownFileExists) {
LOG(TRACE) << "shutdown file found";
@ -255,7 +255,7 @@ void LogfileManager::prepare() {
int res = readShutdownInfo();
if (res != TRI_ERROR_NO_ERROR) {
LOG(FATAL) << "could not open shutdown file '" << shutdownFile
LOG(FATAL) << "could not open shutdown file '" << _shutdownFile
<< "': " << TRI_errno_string(res);
FATAL_ERROR_EXIT();
}
@ -1771,10 +1771,10 @@ void LogfileManager::closeLogfiles() {
// reads the shutdown information
int LogfileManager::readShutdownInfo() {
std::string const filename = shutdownFilename();
TRI_ASSERT(!_shutdownFile.empty());
std::shared_ptr<VPackBuilder> builder;
try {
builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(filename);
builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile(_shutdownFile);
} catch (...) {
return TRI_ERROR_INTERNAL;
}
@ -1838,7 +1838,7 @@ int LogfileManager::readShutdownInfo() {
int LogfileManager::writeShutdownInfo(bool writeShutdownTime) {
TRI_IF_FAILURE("LogfileManagerWriteShutdown") { return TRI_ERROR_DEBUG; }
std::string const filename = shutdownFilename();
TRI_ASSERT(!_shutdownFile.empty());
try {
VPackBuilder builder;
@ -1871,15 +1871,15 @@ int LogfileManager::writeShutdownInfo(bool writeShutdownTime) {
// time
MUTEX_LOCKER(mutexLocker, _shutdownFileLock);
ok = arangodb::basics::VelocyPackHelper::velocyPackToFile(
filename, builder.slice(), true);
_shutdownFile, builder.slice(), true);
}
if (!ok) {
LOG(ERR) << "unable to write WAL state file '" << filename << "'";
LOG(ERR) << "unable to write WAL state file '" << _shutdownFile << "'";
return TRI_ERROR_CANNOT_WRITE_FILE;
}
} catch (...) {
LOG(ERR) << "unable to write WAL state file '" << filename << "'";
LOG(ERR) << "unable to write WAL state file '" << _shutdownFile << "'";
return TRI_ERROR_OUT_OF_MEMORY;
}
@ -2103,8 +2103,11 @@ int LogfileManager::inspectLogfiles() {
LOG(TRACE) << "inspecting logfile " << logfile->id() << " ("
<< logfile->statusText() << ")";
TRI_datafile_t* df = logfile->df();
df->sequentialAccess();
// update the tick statistics
if (!TRI_IterateDatafile(logfile->df(), &RecoverState::InitialScanMarker,
if (!TRI_IterateDatafile(df, &RecoverState::InitialScanMarker,
static_cast<void*>(_recoverState))) {
std::string const logfileName = logfile->filename();
LOG(WARN) << "WAL inspection failed when scanning logfile '"
@ -2114,13 +2117,12 @@ int LogfileManager::inspectLogfiles() {
LOG(TRACE) << "inspected logfile " << logfile->id() << " ("
<< logfile->statusText()
<< "), tickMin: " << logfile->df()->_tickMin
<< ", tickMax: " << logfile->df()->_tickMax;
<< "), tickMin: " << df->_tickMin
<< ", tickMax: " << df->_tickMax;
if (logfile->status() == Logfile::StatusType::SEALED) {
// If it is sealed, switch to random access:
TRI_datafile_t* df = logfile->df();
TRI_MMFileAdvise(df->_data, df->_maximalSize, TRI_MADVISE_RANDOM);
df->randomAccess();
}
{
@ -2230,7 +2232,7 @@ int LogfileManager::ensureDirectory() {
// return the absolute name of the shutdown file
std::string LogfileManager::shutdownFilename() const {
return (_databasePath + TRI_DIR_SEPARATOR_STR) + "SHUTDOWN";
return _databasePath + TRI_DIR_SEPARATOR_STR + "SHUTDOWN";
}
// return an absolute filename for a logfile id
@ -2238,4 +2240,3 @@ std::string LogfileManager::logfileName(Logfile::IdType id) const {
return _directory + std::string("logfile-") + basics::StringUtils::itoa(id) +
std::string(".db");
}

View File

@ -519,6 +519,8 @@ class LogfileManager final : public application_features::ApplicationFeature {
// a lock protecting the shutdown file
Mutex _shutdownFileLock;
std::string _shutdownFile;
// a lock protecting ALL buckets in _transactions
basics::ReadWriteLock _allTransactionsLock;

View File

@ -1020,8 +1020,8 @@ int RecoverState::replayLogfile(Logfile* logfile, int number) {
TRI_datafile_t* df = logfile->df();
// Advise on sequential use:
TRI_MMFileAdvise(df->_data, df->maximalSize(), TRI_MADVISE_SEQUENTIAL);
TRI_MMFileAdvise(df->_data, df->maximalSize(), TRI_MADVISE_WILLNEED);
df->sequentialAccess();
df->willNeed();
if (!TRI_IterateDatafile(df, &RecoverState::ReplayMarker, static_cast<void*>(this))) {
LOG(WARN) << "WAL inspection failed when scanning logfile '" << logfileName << "'";
@ -1029,7 +1029,7 @@ int RecoverState::replayLogfile(Logfile* logfile, int number) {
}
// Advise on random access use:
TRI_MMFileAdvise(df->_data, df->maximalSize(), TRI_MADVISE_RANDOM);
df->randomAccess();
return TRI_ERROR_NO_ERROR;
}

View File

@ -133,6 +133,9 @@ void ShellFeature::start() {
V8ShellFeature* shell = application_features::ApplicationServer::getFeature<V8ShellFeature>("V8Shell");
// turn off memory allocation failures before we move into V8 code
TRI_DisallowMemoryFailures();
bool ok = false;
try {
@ -172,5 +175,7 @@ void ShellFeature::start() {
ok = false;
}
// turn on memory allocation failures again
TRI_AllowMemoryFailures();
*_result = ok ? EXIT_SUCCESS : EXIT_FAILURE;
}

View File

@ -146,6 +146,9 @@ void V8ShellFeature::start() {
}
void V8ShellFeature::unprepare() {
// turn off memory allocation failures before we move into V8 code
TRI_DisallowMemoryFailures();
{
v8::Locker locker{_isolate};
@ -177,6 +180,9 @@ void V8ShellFeature::unprepare() {
}
_isolate->Dispose();
// turn on memory allocation failures again
TRI_AllowMemoryFailures();
}
bool V8ShellFeature::printHello(V8ClientConnection* v8connection) {

View File

@ -117,6 +117,9 @@ ArangoGlobalContext::ArangoGlobalContext(int argc, char* argv[],
TRI_GetInstallRoot(TRI_LocateBinaryPath(argv[0]), InstallDirectory)),
_ret(EXIT_FAILURE),
_useEventLog(true) {
// allow failing memory allocations for the global context thread (i.e. main program thread)
TRI_AllowMemoryFailures();
static char const* serverName = "arangod";
if (_binaryName.size() < strlen(serverName) ||
_binaryName.substr(_binaryName.size() - strlen(serverName)) !=

View File

@ -157,6 +157,9 @@ Thread::Thread(std::string const& name)
_affinity(-1),
_workDescription(nullptr) {
TRI_InitThread(&_thread);
// allow failing memory allocations for all threads by default
TRI_AllowMemoryFailures();
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -26,6 +26,8 @@
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
#include <sys/time.h>
#include <unistd.h>
#include <new>
#endif
////////////////////////////////////////////////////////////////////////////////
@ -74,38 +76,30 @@
#define REALLOC_WRAPPER(zone, ptr, n) BuiltInRealloc(ptr, n)
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief core memory zone, allocation will never fail
////////////////////////////////////////////////////////////////////////////////
static TRI_memory_zone_t TriCoreMemZone;
////////////////////////////////////////////////////////////////////////////////
/// @brief unknown memory zone
////////////////////////////////////////////////////////////////////////////////
static TRI_memory_zone_t TriUnknownMemZone;
////////////////////////////////////////////////////////////////////////////////
/// @brief memory reserve for core memory zone
////////////////////////////////////////////////////////////////////////////////
static void* CoreReserve;
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not the core was initialized
////////////////////////////////////////////////////////////////////////////////
static int CoreInitialized = 0;
////////////////////////////////////////////////////////////////////////////////
/// @brief configuration parameters for memory error tests
////////////////////////////////////////////////////////////////////////////////
/// @brief core memory zone, allocation will never fail
TRI_memory_zone_t* TRI_CORE_MEM_ZONE = &TriCoreMemZone;
/// @brief unknown memory zone
TRI_memory_zone_t* TRI_UNKNOWN_MEM_ZONE = &TriUnknownMemZone;
/// @brief configuration parameters for memory error tests
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
static size_t FailMinSize = 0;
static double FailProbability = 0.0;
static double FailStartStamp = 0.0;
static thread_local int AllowMemoryFailures = -1;
#endif
////////////////////////////////////////////////////////////////////////////////
@ -123,11 +117,8 @@ static inline void CheckSize(uint64_t n, char const* file, int line) {
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief timestamp for failing malloc
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief timestamp for failing malloc
static inline double CurrentTimeStamp(void) {
struct timeval tv;
gettimeofday(&tv, 0);
@ -136,11 +127,8 @@ static inline double CurrentTimeStamp(void) {
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not a malloc operation should intentionally fail
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief whether or not a malloc operation should intentionally fail
static bool ShouldFail(size_t n) {
if (FailMinSize > 0 && FailMinSize > n) {
return false;
@ -150,6 +138,10 @@ static bool ShouldFail(size_t n) {
return false;
}
if (AllowMemoryFailures != 1) {
return false;
}
if (FailStartStamp > 0.0 && CurrentTimeStamp() < FailStartStamp) {
return false;
}
@ -162,11 +154,8 @@ static bool ShouldFail(size_t n) {
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief intentionally failing malloc - used for failure tests
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief intentionally failing malloc - used for failure tests
static void* FailMalloc(TRI_memory_zone_t* zone, size_t n) {
// we can fail, so let's check whether we should fail intentionally...
if (zone->_failable && ShouldFail(n)) {
@ -179,11 +168,8 @@ static void* FailMalloc(TRI_memory_zone_t* zone, size_t n) {
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief intentionally failing realloc - used for failure tests
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief intentionally failing realloc - used for failure tests
static void* FailRealloc(TRI_memory_zone_t* zone, void* old, size_t n) {
// we can fail, so let's check whether we should fail intentionally...
if (zone->_failable && ShouldFail(n)) {
@ -196,11 +182,8 @@ static void* FailRealloc(TRI_memory_zone_t* zone, void* old, size_t n) {
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief initialize failing malloc
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief initialize failing malloc
static void InitFailMalloc(void) {
// get failure probability
char* value = getenv("ARANGODB_FAILMALLOC_PROBABILITY");
@ -232,25 +215,85 @@ static void InitFailMalloc(void) {
}
}
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief core memory zone, allocation will never fail
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
/// @brief overloaded, intentionally failing operator new
void* operator new (size_t size) throw(std::bad_alloc) {
if (ShouldFail(size)) {
throw std::bad_alloc();
}
TRI_memory_zone_t* TRI_CORE_MEM_ZONE = &TriCoreMemZone;
void* pointer = malloc(size);
////////////////////////////////////////////////////////////////////////////////
/// @brief unknown memory zone
////////////////////////////////////////////////////////////////////////////////
if (pointer == nullptr) {
throw std::bad_alloc();
}
TRI_memory_zone_t* TRI_UNKNOWN_MEM_ZONE = &TriUnknownMemZone;
return pointer;
}
/// @brief overloaded, intentionally failing operator new, non-throwing
void* operator new (size_t size, std::nothrow_t const&) throw() {
if (ShouldFail(size)) {
return nullptr;
}
return malloc(size);
}
/// @brief overloaded, intentionally failing operator new[]
void* operator new[] (size_t size) throw(std::bad_alloc) {
if (ShouldFail(size)) {
throw std::bad_alloc();
}
void* pointer = malloc(size);
if (pointer == nullptr) {
throw std::bad_alloc();
}
return pointer;
}
/// @brief overloaded, intentionally failing operator new[], non-throwing
void* operator new[] (size_t size, std::nothrow_t const&) throw() {
if (ShouldFail(size)) {
return nullptr;
}
return malloc(size);
}
/// @brief overloaded operator delete
void operator delete (void* pointer) throw() {
if (pointer) {
free(pointer);
}
}
/// @brief overloaded operator delete
void operator delete (void* pointer, std::nothrow_t const&) throw() {
if (pointer) {
free(pointer);
}
}
/// @brief overloaded operator delete
void operator delete[] (void* pointer) throw() {
if (pointer) {
free(pointer);
}
}
/// @brief overloaded operator delete
void operator delete[] (void* pointer, std::nothrow_t const&) throw() {
if (pointer) {
free(pointer);
}
}
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief system memory allocation
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
void* TRI_SystemAllocateZ(uint64_t n, bool set, char const* file, int line) {
#else
@ -271,10 +314,7 @@ void* TRI_SystemAllocate(uint64_t n, bool set) {
return m;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief basic memory management for allocate
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
void* TRI_AllocateZ(TRI_memory_zone_t* zone, uint64_t n, bool set,
char const* file, int line) {
@ -329,10 +369,7 @@ void* TRI_Allocate(TRI_memory_zone_t* zone, uint64_t n, bool set) {
return m;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief basic memory management for reallocate
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
void* TRI_ReallocateZ(TRI_memory_zone_t* zone, void* m, uint64_t n,
char const* file, int line) {
@ -388,10 +425,7 @@ void* TRI_Reallocate(TRI_memory_zone_t* zone, void* m, uint64_t n) {
return p;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief basic memory management for deallocate
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
void TRI_FreeZ(TRI_memory_zone_t* zone, void* m, char const* file, int line) {
#else
@ -411,13 +445,10 @@ void TRI_Free(TRI_memory_zone_t* zone, void* m) {
free(p);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief free memory allocated by some low-level functions
///
/// this can be used to free memory that was not allocated by TRI_Allocate, but
/// by malloc et al
////////////////////////////////////////////////////////////////////////////////
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
void TRI_SystemFreeZ(void* p, char const* file, int line) {
#else
@ -432,27 +463,19 @@ void TRI_SystemFree(void* p) {
free(p);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief wrapper for realloc
///
/// this wrapper is used together with libev, as the builtin libev allocator
/// causes problems with Valgrind:
/// - http://lists.schmorp.de/pipermail/libev/2012q2/001917.html
/// - http://lists.gnu.org/archive/html/bug-gnulib/2011-03/msg00243.html
////////////////////////////////////////////////////////////////////////////////
void* TRI_WrappedReallocate(void* ptr, long size) {
if (ptr == nullptr && size == 0) {
return nullptr;
}
return BuiltInRealloc(ptr, (size_t)size);
void TRI_AllowMemoryFailures() {
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
AllowMemoryFailures = 1;
#endif
}
////////////////////////////////////////////////////////////////////////////////
/// @brief initialize memory subsystem
////////////////////////////////////////////////////////////////////////////////
void TRI_DisallowMemoryFailures() {
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
AllowMemoryFailures = 0;
#endif
}
/// @brief initialize memory subsystem
void TRI_InitializeMemory() {
if (CoreInitialized == 0) {
static size_t const ReserveSize = 1024 * 1024 * 10;
@ -480,10 +503,7 @@ void TRI_InitializeMemory() {
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief shutdown memory subsystem
////////////////////////////////////////////////////////////////////////////////
void TRI_ShutdownMemory() {
if (CoreInitialized == 1) {
free(CoreReserve);

View File

@ -139,27 +139,14 @@ void TRI_SystemFreeZ(void*, char const*, int);
void TRI_SystemFree(void*);
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief wrapper for realloc
///
/// this wrapper is used together with libev, as the builtin libev allocator
/// causes problems with Valgrind:
/// - http://lists.schmorp.de/pipermail/libev/2012q2/001917.html
/// - http://lists.gnu.org/archive/html/bug-gnulib/2011-03/msg00243.html
////////////////////////////////////////////////////////////////////////////////
void TRI_AllowMemoryFailures();
void* TRI_WrappedReallocate(void*, long);
void TRI_DisallowMemoryFailures();
////////////////////////////////////////////////////////////////////////////////
/// @brief initialize memory subsystem
////////////////////////////////////////////////////////////////////////////////
void TRI_InitializeMemory(void);
////////////////////////////////////////////////////////////////////////////////
/// @brief shut down memory subsystem
////////////////////////////////////////////////////////////////////////////////
void TRI_ShutdownMemory(void);
#endif

View File

@ -143,6 +143,7 @@
#define TRI_DIR_SEPARATOR_STR "/"
#define TRI_O_CLOEXEC O_CLOEXEC
#define TRI_NOATIME O_NOATIME
#define TRI_CHDIR ::chdir
#define TRI_CLOSE ::close
@ -298,6 +299,7 @@
#define TRI_DIR_SEPARATOR_STR "/"
#define TRI_O_CLOEXEC O_CLOEXEC
#define TRI_NOATIME O_NOATIME
#define TRI_CHDIR ::chdir
#define TRI_CLOSE ::close
@ -440,6 +442,7 @@
#define TRI_DIR_SEPARATOR_STR "/"
#define TRI_O_CLOEXEC O_CLOEXEC
#define TRI_NOATIME O_NOATIME
#define TRI_CHDIR ::chdir
#define TRI_CLOSE ::close
@ -601,6 +604,7 @@
#define TRI_DIR_SEPARATOR_STR "/"
#define TRI_O_CLOEXEC O_CLOEXEC
#define TRI_NOATIME O_NOATIME
#define TRI_CHDIR ::chdir
#define TRI_CLOSE ::close
@ -795,6 +799,7 @@ typedef unsigned char bool;
#define S_IWUSR _S_IWRITE
#define TRI_O_CLOEXEC 0
#define TRI_NOATIME 0
#define O_RDONLY _O_RDONLY

View File

@ -148,6 +148,12 @@ void Version::initialize() {
Values["maintainer-mode"] = "false";
#endif
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
Values["failure-tests"] = "true";
#else
Values["failure-tests"] = "false";
#endif
#ifdef ARANGODB_HAVE_TCMALLOC
Values["tcmalloc"] = "true";
#else