From f0a4d69b69aad1c5e57d8da2493cbed608d69831 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Fri, 9 Dec 2016 21:40:49 +0100 Subject: [PATCH 01/12] use bulk allocator for index elements --- arangod/Aql/Executor.cpp | 8 +- arangod/Aql/Functions.cpp | 2 +- arangod/Aql/RestAqlHandler.cpp | 10 +- arangod/Aql/Scopes.cpp | 2 +- arangod/Aql/TraversalConditionFinder.cpp | 4 +- arangod/Cluster/ClusterComm.cpp | 2 +- arangod/Cluster/v8-cluster.cpp | 2 +- arangod/Indexes/FulltextIndex.cpp | 2 +- arangod/Indexes/HashIndex.cpp | 46 ++--- arangod/Indexes/IndexElement.cpp | 42 +---- arangod/Indexes/IndexElement.h | 12 +- arangod/Indexes/PathBasedIndex.cpp | 25 ++- arangod/Indexes/PathBasedIndex.h | 6 +- arangod/Indexes/RocksDBIndex.cpp | 7 +- arangod/Indexes/SkiplistIndex.cpp | 13 +- arangod/RestHandler/RestBatchHandler.cpp | 6 +- arangod/RestHandler/RestImportHandler.cpp | 14 +- .../RestHandler/RestPleaseUpgradeHandler.cpp | 2 +- .../RestHandler/RestReplicationHandler.cpp | 18 +- arangod/RestHandler/RestSimpleHandler.cpp | 2 +- arangod/RestHandler/RestUploadHandler.cpp | 4 +- arangod/Utils/Cursor.cpp | 4 +- arangod/Utils/Transaction.cpp | 2 +- arangod/V8Server/v8-actions.cpp | 6 +- arangod/V8Server/v8-query.cpp | 2 +- arangod/V8Server/v8-replication.cpp | 10 +- arangod/V8Server/v8-vocbase.cpp | 8 +- arangod/V8Server/v8-vocindex.cpp | 2 +- arangod/VocBase/LogicalCollection.cpp | 2 +- arangod/Wal/RecoverState.cpp | 4 +- lib/Basics/Exceptions.cpp | 2 + lib/Basics/FixedSizeAllocator.h | 163 ++++++++++++++++++ 32 files changed, 288 insertions(+), 146 deletions(-) create mode 100644 lib/Basics/FixedSizeAllocator.h diff --git a/arangod/Aql/Executor.cpp b/arangod/Aql/Executor.cpp index 5a3c9fe87c..d158b66263 100644 --- a/arangod/Aql/Executor.cpp +++ b/arangod/Aql/Executor.cpp @@ -84,7 +84,7 @@ V8Expression* Executor::generateExpression(AstNode const* node) { v8::Handle compiled = v8::Script::Compile( TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--")); - if (! compiled.IsEmpty()) { + if (!compiled.IsEmpty()) { v8::Handle func(compiled->Run()); // exit early if an error occurred @@ -108,7 +108,7 @@ V8Expression* Executor::generateExpression(AstNode const* node) { HandleV8Error(tryCatch, empty, _buffer, true); // well we're almost sure we never reach this since the above call should throw: - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to compile AQL script code"); } } @@ -133,7 +133,7 @@ int Executor::executeExpression(Query* query, AstNode const* node, v8::Handle compiled = v8::Script::Compile( TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--")); - if (! compiled.IsEmpty()) { + if (!compiled.IsEmpty()) { v8::Handle func(compiled->Run()); @@ -175,7 +175,7 @@ int Executor::executeExpression(Query* query, AstNode const* node, HandleV8Error(tryCatch, empty, _buffer, true); // well we're almost sure we never reach this since the above call should throw: - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to compile AQL script code"); } } diff --git a/arangod/Aql/Functions.cpp b/arangod/Aql/Functions.cpp index 1f6afa1f1f..996755e0eb 100644 --- a/arangod/Aql/Functions.cpp +++ b/arangod/Aql/Functions.cpp @@ -4010,7 +4010,7 @@ AqlValue Functions::Fulltext(arangodb::aql::Query* query, TRI_QueryFulltextIndex(fulltextIndex->internals(), ft); if (queryResult == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); } TRI_ASSERT(trx->hasDitch(cid)); diff --git a/arangod/Aql/RestAqlHandler.cpp b/arangod/Aql/RestAqlHandler.cpp index f40c2b13b5..22c5ee0928 100644 --- a/arangod/Aql/RestAqlHandler.cpp +++ b/arangod/Aql/RestAqlHandler.cpp @@ -499,7 +499,7 @@ void RestAqlHandler::getInfoQuery(std::string const& operation, auto block = static_cast(query->engine()->root()); if (block->getPlanNode()->getType() != ExecutionNode::SCATTER && block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type"); } number = block->remainingForShard(shardId); } @@ -516,7 +516,7 @@ void RestAqlHandler::getInfoQuery(std::string const& operation, auto block = static_cast(query->engine()->root()); if (block->getPlanNode()->getType() != ExecutionNode::SCATTER && block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type"); } hasMore = block->hasMoreForShard(shardId); } @@ -719,7 +719,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query, auto block = static_cast(query->engine()->root()); if (block->getPlanNode()->getType() != ExecutionNode::SCATTER && block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type"); } items.reset(block->getSomeForShard(atLeast, atMost, shardId)); } @@ -755,7 +755,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query, static_cast(query->engine()->root()); if (block->getPlanNode()->getType() != ExecutionNode::SCATTER && block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type"); } skipped = block->skipSomeForShard(atLeast, atMost, shardId); } @@ -783,7 +783,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query, static_cast(query->engine()->root()); if (block->getPlanNode()->getType() != ExecutionNode::SCATTER && block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type"); } exhausted = block->skipForShard(number, shardId); } diff --git a/arangod/Aql/Scopes.cpp b/arangod/Aql/Scopes.cpp index c5ca8b1d23..93e902114c 100644 --- a/arangod/Aql/Scopes.cpp +++ b/arangod/Aql/Scopes.cpp @@ -203,7 +203,7 @@ void Scopes::replaceVariable(Variable* variable) { } } - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find AQL variable in scopes"); } /// @brief checks whether a variable exists in any scope diff --git a/arangod/Aql/TraversalConditionFinder.cpp b/arangod/Aql/TraversalConditionFinder.cpp index d81771b484..79284e37a1 100644 --- a/arangod/Aql/TraversalConditionFinder.cpp +++ b/arangod/Aql/TraversalConditionFinder.cpp @@ -60,7 +60,7 @@ static AstNode* createGlobalCondition(Ast* ast, AstNode const* condition) { type = NODE_TYPE_OPERATOR_BINARY_NIN; break; default: - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unsupported operator type"); } auto quantifier = condition->getMemberUnchecked(2); TRI_ASSERT(quantifier->type == NODE_TYPE_QUANTIFIER); @@ -69,7 +69,7 @@ static AstNode* createGlobalCondition(Ast* ast, AstNode const* condition) { if (val == Quantifier::NONE) { auto it = Ast::NegatedOperators.find(type); if (it == Ast::NegatedOperators.end()) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unsupported operator type"); } type = it->second; } diff --git a/arangod/Cluster/ClusterComm.cpp b/arangod/Cluster/ClusterComm.cpp index d16be0a81f..bc57f94a86 100644 --- a/arangod/Cluster/ClusterComm.cpp +++ b/arangod/Cluster/ClusterComm.cpp @@ -699,7 +699,7 @@ void ClusterComm::asyncAnswer(std::string& coordinatorHeader, // FIXME - generalize for VPP HttpResponse* responseToSend = dynamic_cast(response); if (responseToSend == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } // First take apart the header to get the coordinatorID: diff --git a/arangod/Cluster/v8-cluster.cpp b/arangod/Cluster/v8-cluster.cpp index ee778b8bd0..9d90b0e7d3 100644 --- a/arangod/Cluster/v8-cluster.cpp +++ b/arangod/Cluster/v8-cluster.cpp @@ -1641,7 +1641,7 @@ static void Return_PrepareClusterCommResultForJS( // FIXME HANDLE VPP auto httpRequest = std::dynamic_pointer_cast(res.answer); if (httpRequest == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } // The headers: diff --git a/arangod/Indexes/FulltextIndex.cpp b/arangod/Indexes/FulltextIndex.cpp index d4cda9d651..d8f8b57afe 100644 --- a/arangod/Indexes/FulltextIndex.cpp +++ b/arangod/Indexes/FulltextIndex.cpp @@ -85,7 +85,7 @@ FulltextIndex::FulltextIndex(TRI_idx_iid_t iid, _sparse = true; if (_fields.size() != 1) { // We need exactly 1 attribute - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "fulltext index definition should have exactly one attribute"); } auto& attribute = _fields[0]; _attr.reserve(attribute.size()); diff --git a/arangod/Indexes/HashIndex.cpp b/arangod/Indexes/HashIndex.cpp index 17e7334176..e8f8b689ac 100644 --- a/arangod/Indexes/HashIndex.cpp +++ b/arangod/Indexes/HashIndex.cpp @@ -26,6 +26,7 @@ #include "Aql/AstNode.h" #include "Aql/SortCondition.h" #include "Basics/Exceptions.h" +#include "Basics/FixedSizeAllocator.h" #include "Basics/VelocyPackHelper.h" #include "Indexes/IndexLookupContext.h" #include "Indexes/SimpleAttributeEqualityMatcher.h" @@ -432,13 +433,6 @@ HashIndex::UniqueArray::UniqueArray( /// @brief destroy the unique array HashIndex::UniqueArray::~UniqueArray() { - if (_hashArray != nullptr) { - auto cb = [this](HashIndexElement* element) -> bool { - element->free(); return true; - }; - _hashArray->invokeOnAllElements(cb); - } - delete _hashArray; delete _hashElement; delete _isEqualElElByKey; @@ -460,13 +454,6 @@ HashIndex::MultiArray::MultiArray(size_t numPaths, /// @brief destroy the multi array HashIndex::MultiArray::~MultiArray() { - if (_hashArray != nullptr) { - auto cb = [this](HashIndexElement* element) -> bool { - element->free(); return true; - }; - _hashArray->invokeOnAllElements(cb); - } - delete _hashArray; delete _hashElement; delete _isEqualElElByKey; @@ -474,7 +461,7 @@ HashIndex::MultiArray::~MultiArray() { HashIndex::HashIndex(TRI_idx_iid_t iid, LogicalCollection* collection, VPackSlice const& info) - : PathBasedIndex(iid, collection, info, false), _uniqueArray(nullptr) { + : PathBasedIndex(iid, collection, info, sizeof(TRI_voc_rid_t) + sizeof(uint32_t), false), _uniqueArray(nullptr) { uint32_t indexBuckets = 1; if (collection != nullptr) { @@ -650,7 +637,7 @@ int HashIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { for (auto& hashElement : elements) { - hashElement->free(); + _allocator->deallocate(hashElement); } return res; } @@ -668,7 +655,7 @@ int HashIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (result != TRI_ERROR_NO_ERROR) { res = result; } - hashElement->free(); + _allocator->deallocate(hashElement); } return res; @@ -686,10 +673,11 @@ int HashIndex::batchInsert(arangodb::Transaction* trx, int HashIndex::unload() { if (_unique) { - _uniqueArray->_hashArray->truncate([](HashIndexElement* element) -> bool { element->free(); return true; }); + _uniqueArray->_hashArray->truncate([](HashIndexElement*) -> bool { return true; }); } else { - _multiArray->_hashArray->truncate([](HashIndexElement* element) -> bool { element->free(); return true; }); + _multiArray->_hashArray->truncate([](HashIndexElement*) -> bool { return true; }); } + _allocator->deallocateAll(); return TRI_ERROR_NO_ERROR; } @@ -751,7 +739,7 @@ int HashIndex::insertUnique(arangodb::Transaction* trx, TRI_voc_rid_t revisionId if (res != TRI_ERROR_NO_ERROR) { for (auto& it : elements) { // free all elements to prevent leak - it->free(); + _allocator->deallocate(it); } return res; @@ -775,7 +763,7 @@ int HashIndex::insertUnique(arangodb::Transaction* trx, TRI_voc_rid_t revisionId if (res != TRI_ERROR_NO_ERROR) { for (size_t j = i; j < n; ++j) { // Free all elements that are not yet in the index - elements[j]->free(); + _allocator->deallocate(elements[j]); } // Already indexed elements will be removed by the rollback break; @@ -796,7 +784,7 @@ int HashIndex::batchInsertUnique(arangodb::Transaction* trx, if (res != TRI_ERROR_NO_ERROR) { for (auto& it : elements) { // free all elements to prevent leak - it->free(); + _allocator->deallocate(it); } return res; } @@ -823,7 +811,7 @@ int HashIndex::batchInsertUnique(arangodb::Transaction* trx, if (res != TRI_ERROR_NO_ERROR) { for (auto& it : elements) { // free all elements to prevent leak - it->free(); + _allocator->deallocate(it); } } @@ -837,7 +825,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { for (auto& hashElement : elements) { - hashElement->free(); + _allocator->deallocate(hashElement); } return res; } @@ -855,7 +843,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (found != nullptr) { // already got the exact same index entry. now free our local element... - element->free(); + _allocator->deallocate(element); } }; @@ -875,7 +863,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { for (size_t j = i; j < n; ++j) { // Free all elements that are not yet in the index - elements[j]->free(); + _allocator->deallocate(elements[j]); } for (size_t j = 0; j < i; ++j) { // Remove all already indexed elements and free them @@ -903,7 +891,7 @@ int HashIndex::batchInsertMulti(arangodb::Transaction* trx, // Filling the elements failed for some reason. Assume loading as failed for (auto& el : elements) { // Free all elements that are not yet in the index - el->free(); + _allocator->deallocate(el); } return res; } @@ -943,7 +931,7 @@ int HashIndex::removeUniqueElement(arangodb::Transaction* trx, } return TRI_ERROR_INTERNAL; } - old->free(); + _allocator->deallocate(old); return TRI_ERROR_NO_ERROR; } @@ -963,7 +951,7 @@ int HashIndex::removeMultiElement(arangodb::Transaction* trx, } return TRI_ERROR_INTERNAL; } - old->free(); + _allocator->deallocate(old); return TRI_ERROR_NO_ERROR; } diff --git a/arangod/Indexes/IndexElement.cpp b/arangod/Indexes/IndexElement.cpp index 16875021f1..b7efa63bf9 100644 --- a/arangod/Indexes/IndexElement.cpp +++ b/arangod/Indexes/IndexElement.cpp @@ -35,26 +35,13 @@ HashIndexElement::HashIndexElement(TRI_voc_rid_t revisionId, std::vector> const& values) { +HashIndexElement* HashIndexElement::initialize(HashIndexElement* element, + TRI_voc_rid_t revisionId, + std::vector> const& values) { TRI_ASSERT(!values.empty()); - void* space = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, baseMemoryUsage(values.size()), false); - - if (space == nullptr) { - return nullptr; - } - - try { - return new (space) HashIndexElement(revisionId, values); - } catch (...) { - TRI_Free(TRI_UNKNOWN_MEM_ZONE, space); - return nullptr; - } + return new (element) HashIndexElement(revisionId, values); } -void HashIndexElement::free() { - TRI_Free(TRI_UNKNOWN_MEM_ZONE, this); -} - /// @brief velocypack sub-object (for indexes, as part of IndexElement, /// if offset is non-zero, then it is an offset into the VelocyPack data in /// the datafile or WAL file. If offset is 0, then data contains the actual data @@ -126,26 +113,13 @@ SkiplistIndexElement::SkiplistIndexElement(TRI_voc_rid_t revisionId, std::vector } } -SkiplistIndexElement* SkiplistIndexElement::create(TRI_voc_rid_t revisionId, std::vector> const& values) { +SkiplistIndexElement* SkiplistIndexElement::initialize(SkiplistIndexElement* element, + TRI_voc_rid_t revisionId, + std::vector> const& values) { TRI_ASSERT(!values.empty()); - void* space = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, baseMemoryUsage(values.size()), false); - - if (space == nullptr) { - return nullptr; - } - - try { - return new (space) SkiplistIndexElement(revisionId, values); - } catch (...) { - TRI_Free(TRI_UNKNOWN_MEM_ZONE, space); - return nullptr; - } + return new (element) SkiplistIndexElement(revisionId, values); } -void SkiplistIndexElement::free() { - TRI_Free(TRI_UNKNOWN_MEM_ZONE, this); -} - /// @brief velocypack sub-object (for indexes, as part of IndexElement, /// if offset is non-zero, then it is an offset into the VelocyPack data in /// the datafile or WAL file. If offset is 0, then data contains the actual data diff --git a/arangod/Indexes/IndexElement.h b/arangod/Indexes/IndexElement.h index fdfbf9445e..ecec78dff5 100644 --- a/arangod/Indexes/IndexElement.h +++ b/arangod/Indexes/IndexElement.h @@ -140,9 +140,9 @@ struct HashIndexElement { static uint64_t hash(std::vector> const& values); /// @brief allocate a new index element from a vector of slices - static HashIndexElement* create(TRI_voc_rid_t revisionId, std::vector> const& values); - - void free(); + static HashIndexElement* initialize(HashIndexElement* memory, + TRI_voc_rid_t revisionId, + std::vector> const& values); private: inline IndexElementValue* subObject(size_t position) { @@ -188,9 +188,9 @@ struct SkiplistIndexElement { arangodb::velocypack::Slice slice(IndexLookupContext* context, size_t position) const; /// @brief allocate a new index element from a vector of slices - static SkiplistIndexElement* create(TRI_voc_rid_t revisionId, std::vector> const& values); - - void free(); + static SkiplistIndexElement* initialize(SkiplistIndexElement* element, + TRI_voc_rid_t revisionId, + std::vector> const& values); private: inline IndexElementValue* subObject(size_t position) { diff --git a/arangod/Indexes/PathBasedIndex.cpp b/arangod/Indexes/PathBasedIndex.cpp index 425ecce868..db8969e475 100644 --- a/arangod/Indexes/PathBasedIndex.cpp +++ b/arangod/Indexes/PathBasedIndex.cpp @@ -23,6 +23,7 @@ #include "PathBasedIndex.h" #include "Aql/AstNode.h" +#include "Basics/FixedSizeAllocator.h" #include "Basics/VelocyPackHelper.h" #include "Logger/Logger.h" @@ -53,7 +54,7 @@ arangodb::aql::AstNode const* PathBasedIndex::PermutationState::getValue() /// @brief create the index PathBasedIndex::PathBasedIndex(TRI_idx_iid_t iid, arangodb::LogicalCollection* collection, - VPackSlice const& info, bool allowPartialIndex) + VPackSlice const& info, size_t baseSize, bool allowPartialIndex) : Index(iid, collection, info), _useExpansion(false), _allowPartialIndex(allowPartialIndex) { @@ -69,10 +70,14 @@ PathBasedIndex::PathBasedIndex(TRI_idx_iid_t iid, break; } } + + _allocator.reset(new FixedSizeAllocator(baseSize + sizeof(IndexElementValue) * numPaths())); } /// @brief destroy the index -PathBasedIndex::~PathBasedIndex() {} +PathBasedIndex::~PathBasedIndex() { + _allocator->deallocateAll(); +} /// @brief whether or not the index is implicitly unique /// this can be the case if the index is not declared as unique, but contains a @@ -121,14 +126,16 @@ int PathBasedIndex::fillElement(std::vector& elements, if (slices.size() == n) { // if shapes.size() != n, then the value is not inserted into the index // because of index sparsity! - T* element = T::create(revisionId, slices); + T* element = static_cast(_allocator->allocate()); + TRI_ASSERT(element != nullptr); + element = T::initialize(element, revisionId, slices); if (element == nullptr) { return TRI_ERROR_OUT_OF_MEMORY; } TRI_IF_FAILURE("FillElementOOM") { // clean up manually - element->free(); + _allocator->deallocate(element); return TRI_ERROR_OUT_OF_MEMORY; } @@ -139,7 +146,7 @@ int PathBasedIndex::fillElement(std::vector& elements, elements.emplace_back(element); } catch (...) { - element->free(); + _allocator->deallocate(element); return TRI_ERROR_OUT_OF_MEMORY; } } @@ -155,14 +162,16 @@ int PathBasedIndex::fillElement(std::vector& elements, for (auto& info : toInsert) { TRI_ASSERT(info.size() == n); - T* element = T::create(revisionId, info); + T* element = static_cast(_allocator->allocate()); + TRI_ASSERT(element != nullptr); + element = T::initialize(element, revisionId, info); if (element == nullptr) { return TRI_ERROR_OUT_OF_MEMORY; } TRI_IF_FAILURE("FillElementOOM") { // clean up manually - element->free(); + _allocator->deallocate(element); return TRI_ERROR_OUT_OF_MEMORY; } @@ -173,7 +182,7 @@ int PathBasedIndex::fillElement(std::vector& elements, elements.emplace_back(element); } catch (...) { - element->free(); + _allocator->deallocate(element); return TRI_ERROR_OUT_OF_MEMORY; } } diff --git a/arangod/Indexes/PathBasedIndex.h b/arangod/Indexes/PathBasedIndex.h index a5d7e05780..3b321eeffc 100644 --- a/arangod/Indexes/PathBasedIndex.h +++ b/arangod/Indexes/PathBasedIndex.h @@ -34,6 +34,8 @@ namespace aql { enum AstNodeType : uint32_t; } +class FixedSizeAllocator; + class PathBasedIndex : public Index { protected: struct PermutationState { @@ -61,7 +63,7 @@ class PathBasedIndex : public Index { PathBasedIndex() = delete; PathBasedIndex(TRI_idx_iid_t, arangodb::LogicalCollection*, - arangodb::velocypack::Slice const&, bool allowPartialIndex); + arangodb::velocypack::Slice const&, size_t baseSize, bool allowPartialIndex); ~PathBasedIndex(); @@ -105,6 +107,8 @@ class PathBasedIndex : public Index { std::vector>& sliceStack); protected: + std::unique_ptr _allocator; + /// @brief the attribute paths std::vector> _paths; diff --git a/arangod/Indexes/RocksDBIndex.cpp b/arangod/Indexes/RocksDBIndex.cpp index 5034ced834..ad2791a2a1 100644 --- a/arangod/Indexes/RocksDBIndex.cpp +++ b/arangod/Indexes/RocksDBIndex.cpp @@ -25,6 +25,7 @@ #include "Aql/AstNode.h" #include "Aql/SortCondition.h" #include "Basics/AttributeNameParser.h" +#include "Basics/FixedSizeAllocator.h" #include "Basics/StaticStrings.h" #include "Basics/VelocyPackHelper.h" #include "Indexes/IndexLookupContext.h" @@ -206,7 +207,7 @@ IndexLookupResult RocksDBIterator::next() { RocksDBIndex::RocksDBIndex(TRI_idx_iid_t iid, arangodb::LogicalCollection* collection, arangodb::velocypack::Slice const& info) - : PathBasedIndex(iid, collection, info, true), + : PathBasedIndex(iid, collection, info, 0, true), _db(RocksDBFeature::instance()->db()) {} /// @brief destroy the index @@ -246,7 +247,7 @@ int RocksDBIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, // make sure we clean up before we leave this method auto cleanup = [this, &elements] { for (auto& it : elements) { - it->free(); + _allocator->deallocate(it); } }; @@ -402,7 +403,7 @@ int RocksDBIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, // make sure we clean up before we leave this method auto cleanup = [this, &elements] { for (auto& it : elements) { - it->free(); + _allocator->deallocate(it); } }; diff --git a/arangod/Indexes/SkiplistIndex.cpp b/arangod/Indexes/SkiplistIndex.cpp index 0a57342719..187b9750c3 100644 --- a/arangod/Indexes/SkiplistIndex.cpp +++ b/arangod/Indexes/SkiplistIndex.cpp @@ -25,6 +25,7 @@ #include "Aql/AstNode.h" #include "Aql/SortCondition.h" #include "Basics/AttributeNameParser.h" +#include "Basics/FixedSizeAllocator.h" #include "Basics/StaticStrings.h" #include "Basics/VelocyPackHelper.h" #include "Indexes/IndexLookupContext.h" @@ -715,12 +716,12 @@ void SkiplistIterator2::initNextInterval() { SkiplistIndex::SkiplistIndex(TRI_idx_iid_t iid, arangodb::LogicalCollection* collection, VPackSlice const& info) - : PathBasedIndex(iid, collection, info, true), + : PathBasedIndex(iid, collection, info, sizeof(TRI_voc_rid_t), true), CmpElmElm(this), CmpKeyElm(this), _skiplistIndex(nullptr) { _skiplistIndex = - new TRI_Skiplist(CmpElmElm, CmpKeyElm, [this](SkiplistIndexElement* element) { element->free(); }, _unique, _useExpansion); + new TRI_Skiplist(CmpElmElm, CmpKeyElm, [this](SkiplistIndexElement* element) { _allocator->deallocate(element); }, _unique, _useExpansion); } /// @brief destroy the skiplist index @@ -761,7 +762,7 @@ int SkiplistIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { for (auto& element : elements) { // free all elements to prevent leak - element->free(); + _allocator->deallocate(element); } return res; } @@ -779,7 +780,7 @@ int SkiplistIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { // Note: this element is freed already for (size_t j = i; j < count; ++j) { - elements[j]->free(); + _allocator->deallocate(elements[j]); } for (size_t j = 0; j < i; ++j) { _skiplistIndex->remove(&context, elements[j]); @@ -812,7 +813,7 @@ int SkiplistIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, if (res != TRI_ERROR_NO_ERROR) { for (auto& element : elements) { // free all elements to prevent leak - element->free(); + _allocator->deallocate(element); } return res; } @@ -833,7 +834,7 @@ int SkiplistIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId, res = result; } - elements[i]->free(); + _allocator->deallocate(elements[i]); } return res; diff --git a/arangod/RestHandler/RestBatchHandler.cpp b/arangod/RestHandler/RestBatchHandler.cpp index cb4dfe85e8..3f418f95ef 100644 --- a/arangod/RestHandler/RestBatchHandler.cpp +++ b/arangod/RestHandler/RestBatchHandler.cpp @@ -72,7 +72,7 @@ RestStatus RestBatchHandler::executeHttp() { if (httpResponse == nullptr) { std::cout << "please fix this for vpack" << std::endl; - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } HttpRequest const* httpRequest = @@ -80,7 +80,7 @@ RestStatus RestBatchHandler::executeHttp() { if (httpRequest == nullptr) { std::cout << "please fix this for vpack" << std::endl; - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } // extract the request type @@ -290,7 +290,7 @@ bool RestBatchHandler::getBoundaryBody(std::string* result) { HttpRequest const* req = dynamic_cast(_request.get()); if (req == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& bodyStr = req->body(); diff --git a/arangod/RestHandler/RestImportHandler.cpp b/arangod/RestHandler/RestImportHandler.cpp index 49d586791c..33ec66af2e 100644 --- a/arangod/RestHandler/RestImportHandler.cpp +++ b/arangod/RestHandler/RestImportHandler.cpp @@ -276,7 +276,7 @@ int RestImportHandler::handleSingleDocument(SingleCollectionTransaction& trx, bool RestImportHandler::createFromJson(std::string const& type) { if (_request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request"); } RestImportResult result; @@ -319,7 +319,7 @@ bool RestImportHandler::createFromJson(std::string const& type) { linewise = true; if (_response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response"); } // auto detect import type by peeking at first non-whitespace character @@ -328,7 +328,7 @@ bool RestImportHandler::createFromJson(std::string const& type) { HttpRequest* req = dynamic_cast(_request.get()); if (req == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& body = req->body(); @@ -388,7 +388,7 @@ bool RestImportHandler::createFromJson(std::string const& type) { HttpRequest* req = dynamic_cast(_request.get()); if (req == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } // each line is a separate JSON document @@ -529,7 +529,7 @@ bool RestImportHandler::createFromJson(std::string const& type) { bool RestImportHandler::createFromVPack(std::string const& type) { if (_request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request"); } RestImportResult result; @@ -637,7 +637,7 @@ bool RestImportHandler::createFromVPack(std::string const& type) { bool RestImportHandler::createFromKeyValueList() { if (_request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request"); } RestImportResult result; @@ -679,7 +679,7 @@ bool RestImportHandler::createFromKeyValueList() { HttpRequest* httpRequest = dynamic_cast(_request.get()); if (httpRequest == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& bodyStr = httpRequest->body(); diff --git a/arangod/RestHandler/RestPleaseUpgradeHandler.cpp b/arangod/RestHandler/RestPleaseUpgradeHandler.cpp index b370d267e5..ce3c881fba 100644 --- a/arangod/RestHandler/RestPleaseUpgradeHandler.cpp +++ b/arangod/RestHandler/RestPleaseUpgradeHandler.cpp @@ -40,7 +40,7 @@ RestStatus RestPleaseUpgradeHandler::execute() { auto response = dynamic_cast(_response.get()); if (response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } resetResponse(rest::ResponseCode::OK); diff --git a/arangod/RestHandler/RestReplicationHandler.cpp b/arangod/RestHandler/RestReplicationHandler.cpp index c6597eaf3b..3ef5fa436f 100644 --- a/arangod/RestHandler/RestReplicationHandler.cpp +++ b/arangod/RestHandler/RestReplicationHandler.cpp @@ -744,7 +744,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() { useVpp = true; } if (_request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request"); } // First check the DBserver component of the body json: @@ -783,7 +783,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() { if (!useVpp) { HttpRequest* httpRequest = dynamic_cast(_request.get()); if (httpRequest == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } // Send a synchronous request to that shard using ClusterComm: @@ -833,7 +833,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() { if (!useVpp) { HttpResponse* httpResponse = dynamic_cast(_response.get()); if (_response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } httpResponse->body().swap(&(res->result->getBody())); } else { @@ -1014,7 +1014,7 @@ void RestReplicationHandler::handleCommandLoggerFollow() { dynamic_cast(_response.get()); if (httpResponse == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } if (length > 0) { @@ -1086,7 +1086,7 @@ void RestReplicationHandler::handleCommandDetermineOpenTransactions() { HttpResponse* httpResponse = dynamic_cast(_response.get()); if (_response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } _response->setContentType(rest::ContentType::DUMP); @@ -2132,7 +2132,7 @@ int RestReplicationHandler::processRestoreDataBatch( HttpRequest* httpRequest = dynamic_cast(_request.get()); if (httpRequest == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& bodyStr = httpRequest->body(); @@ -2466,12 +2466,12 @@ void RestReplicationHandler::handleCommandRestoreDataCoordinator() { VPackBuilder builder; if (_request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } HttpRequest* httpRequest = dynamic_cast(_request.get()); if (httpRequest == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& bodyStr = httpRequest->body(); @@ -3099,7 +3099,7 @@ void RestReplicationHandler::handleCommandDump() { auto response = dynamic_cast(_response.get()); if (response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type"); } response->setContentType(rest::ContentType::DUMP); diff --git a/arangod/RestHandler/RestSimpleHandler.cpp b/arangod/RestHandler/RestSimpleHandler.cpp index 714c0f5cc6..4c505ed185 100644 --- a/arangod/RestHandler/RestSimpleHandler.cpp +++ b/arangod/RestHandler/RestSimpleHandler.cpp @@ -287,7 +287,7 @@ void RestSimpleHandler::lookupByKeys(VPackSlice const& slice) { auto response = _response.get(); if (response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response"); } try { diff --git a/arangod/RestHandler/RestUploadHandler.cpp b/arangod/RestHandler/RestUploadHandler.cpp index e67f7c7a94..4be522d8cc 100644 --- a/arangod/RestHandler/RestUploadHandler.cpp +++ b/arangod/RestHandler/RestUploadHandler.cpp @@ -46,7 +46,7 @@ RestStatus RestUploadHandler::execute() { HttpRequest* request = dynamic_cast(_request.get()); if (request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } // extract the request type @@ -152,7 +152,7 @@ bool RestUploadHandler::parseMultiPart(char const*& body, size_t& length) { HttpRequest* request = dynamic_cast(_request.get()); if (request == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& bodyStr = request->body(); diff --git a/arangod/Utils/Cursor.cpp b/arangod/Utils/Cursor.cpp index f799486e1c..84a411f72a 100644 --- a/arangod/Utils/Cursor.cpp +++ b/arangod/Utils/Cursor.cpp @@ -159,7 +159,7 @@ void VelocyPackCursor::dump(VPackBuilder& builder) { } catch (std::exception const& ex) { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what()); } catch (...) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "internal error during VPackCursor::dump"); } } @@ -298,7 +298,7 @@ void ExportCursor::dump(VPackBuilder& builder) { } catch (std::exception const& ex) { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what()); } catch (...) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "internal error during ExportCursor::dump"); } builder.options = oldOptions; } diff --git a/arangod/Utils/Transaction.cpp b/arangod/Utils/Transaction.cpp index 59d2106d2d..fe4021c53b 100644 --- a/arangod/Utils/Transaction.cpp +++ b/arangod/Utils/Transaction.cpp @@ -649,7 +649,7 @@ DocumentDitch* Transaction::orderDitch(TRI_voc_cid_t cid) { TRI_transaction_collection_t* trxCollection = TRI_GetCollectionTransaction(_trx, cid, TRI_TRANSACTION_READ); if (trxCollection == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to determine transaction collection"); } TRI_ASSERT(trxCollection->_collection != nullptr); diff --git a/arangod/V8Server/v8-actions.cpp b/arangod/V8Server/v8-actions.cpp index 3da1254aac..5701d6e19c 100644 --- a/arangod/V8Server/v8-actions.cpp +++ b/arangod/V8Server/v8-actions.cpp @@ -407,7 +407,7 @@ static v8::Handle RequestCppToV8(v8::Isolate* isolate, if (rest::ContentType::JSON == request->contentType()) { auto httpreq = dynamic_cast(request); if (httpreq == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } std::string const& body = httpreq->body(); req->ForceSet(RequestBodyKey, TRI_V8_STD_STRING(body)); @@ -516,7 +516,7 @@ static v8::Handle RequestCppToV8(v8::Isolate* isolate, HttpRequest* httpRequest = dynamic_cast(request); if (httpRequest == nullptr) { // maybe we can just continue - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type"); } else { for (auto& it : httpRequest->cookieValues()) { cookiesObject->ForceSet(TRI_V8_STD_STRING(it.first), @@ -832,7 +832,7 @@ static TRI_action_result_t ExecuteActionVocbase( v8::TryCatch tryCatch; if (response == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response"); } TRI_GET_GLOBALS(); diff --git a/arangod/V8Server/v8-query.cpp b/arangod/V8Server/v8-query.cpp index 7f3d165a10..d1d72b5154 100644 --- a/arangod/V8Server/v8-query.cpp +++ b/arangod/V8Server/v8-query.cpp @@ -111,7 +111,7 @@ static void EdgesQuery(TRI_edge_direction_e direction, return "FILTER doc._from " + op + " @value || doc._to " + op + " @value"; } - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid edge index direction"); }; arangodb::LogicalCollection const* collection = diff --git a/arangod/V8Server/v8-replication.cpp b/arangod/V8Server/v8-replication.cpp index 70c77b7cef..b6c1a60996 100644 --- a/arangod/V8Server/v8-replication.cpp +++ b/arangod/V8Server/v8-replication.cpp @@ -433,7 +433,7 @@ static void JS_ConfigureApplierReplication( } if (vocbase->replicationApplier() == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier"); } if (args.Length() == 0) { @@ -702,7 +702,7 @@ static void JS_StartApplierReplication( } if (vocbase->replicationApplier() == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier"); } if (args.Length() > 2) { @@ -753,7 +753,7 @@ static void JS_ShutdownApplierReplication( } if (vocbase->replicationApplier() == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier"); } int res = vocbase->replicationApplier()->shutdown(); @@ -786,7 +786,7 @@ static void JS_StateApplierReplication( } if (vocbase->replicationApplier() == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier"); } std::shared_ptr builder = vocbase->replicationApplier()->toVelocyPack(); @@ -817,7 +817,7 @@ static void JS_ForgetApplierReplication( } if (vocbase->replicationApplier() == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier"); } int res = vocbase->replicationApplier()->forget(); diff --git a/arangod/V8Server/v8-vocbase.cpp b/arangod/V8Server/v8-vocbase.cpp index 7613ae72f3..18dd22ff4c 100644 --- a/arangod/V8Server/v8-vocbase.cpp +++ b/arangod/V8Server/v8-vocbase.cpp @@ -286,7 +286,7 @@ static void JS_Transaction(v8::FunctionCallbackInfo const& args) { } if (params.IsEmpty()) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to decode function parameters"); } bool embed = false; @@ -375,7 +375,7 @@ static void JS_Transaction(v8::FunctionCallbackInfo const& args) { } catch (std::exception const& ex) { TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what()); } catch (...) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "caught unknown exception during transaction"); } res = trx.commit(); @@ -2044,7 +2044,7 @@ static void JS_UseDatabase(v8::FunctionCallbackInfo const& args) { TRI_vocbase_t* vocbase = GetContextVocBase(isolate); if (vocbase == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find database"); } if (vocbase->isDropped()) { @@ -2270,7 +2270,7 @@ static void CreateDatabaseCoordinator( } if (vocbase == nullptr) { - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find database"); } // now run upgrade and copy users into context diff --git a/arangod/V8Server/v8-vocindex.cpp b/arangod/V8Server/v8-vocindex.cpp index f81bf9eeb4..d87dd1688b 100644 --- a/arangod/V8Server/v8-vocindex.cpp +++ b/arangod/V8Server/v8-vocindex.cpp @@ -599,7 +599,7 @@ static void EnsureIndex(v8::FunctionCallbackInfo const& args, VPackSlice f = flds.at(i); if (!f.isString()) { // index attributes must be strings - TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL); + TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "index field names should be strings"); } indexKeys.emplace(f.copyString()); } diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index db92f4ee18..5270d855ef 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -244,7 +244,7 @@ static std::shared_ptr PrepareIndexFromSlice(VPackSlice info, switch (type) { case arangodb::Index::TRI_IDX_TYPE_UNKNOWN: { - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid index type"); } case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: { if (!isClusterConstructor) { diff --git a/arangod/Wal/RecoverState.cpp b/arangod/Wal/RecoverState.cpp index 517e34f2f4..1f06d4b4c5 100644 --- a/arangod/Wal/RecoverState.cpp +++ b/arangod/Wal/RecoverState.cpp @@ -52,7 +52,7 @@ template static inline T NumericValue(VPackSlice const& slice, char const* attribute) { if (!slice.isObject()) { LOG(ERR) << "invalid value type when looking for attribute '" << attribute << "': expecting object"; - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "invalid attribute value: expecting object"); } VPackSlice v = slice.get(attribute); if (v.isString()) { @@ -63,7 +63,7 @@ static inline T NumericValue(VPackSlice const& slice, char const* attribute) { } LOG(ERR) << "invalid value for attribute '" << attribute << "'"; - THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "invalid attribute value"); } /// @brief creates the recover state diff --git a/lib/Basics/Exceptions.cpp b/lib/Basics/Exceptions.cpp index 7010cd9620..41d645312a 100644 --- a/lib/Basics/Exceptions.cpp +++ b/lib/Basics/Exceptions.cpp @@ -108,6 +108,8 @@ char const* Exception::what() const throw() { return _errorMessage.c_str(); } void Exception::appendLocation () { if (_code == TRI_ERROR_INTERNAL) { _errorMessage += std::string(" (exception location: ") + _file + ":" + std::to_string(_line) + "). Please report this error to arangodb.com"; + } else if (_code == TRI_ERROR_OUT_OF_MEMORY) { + _errorMessage += std::string(" (exception location: ") + _file + ":" + std::to_string(_line) + ")."; } #ifdef ARANGODB_ENABLE_MAINTAINER_MODE diff --git a/lib/Basics/FixedSizeAllocator.h b/lib/Basics/FixedSizeAllocator.h new file mode 100644 index 0000000000..104233a4ab --- /dev/null +++ b/lib/Basics/FixedSizeAllocator.h @@ -0,0 +1,163 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Max Neunhoeffer +/// @author Jan Steemann +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_BASICS_FIXED_SIZE_ALLOCATOR_H +#define ARANGODB_BASICS_FIXED_SIZE_ALLOCATOR_H 1 + +#include "Basics/Common.h" +#include "Logger/Logger.h" + +namespace arangodb { + +class FixedSizeAllocator { + private: + + class MemoryBlock { + public: + MemoryBlock(MemoryBlock const&) = delete; + MemoryBlock& operator=(MemoryBlock const&) = delete; + + MemoryBlock(size_t itemSize, size_t nrItems) + : _itemSize(itemSize), _nrAlloc(nrItems), _nrUsed(0), _alloc(nullptr), _data(nullptr) { + + _alloc = new char[(itemSize * nrItems) + 64]; + + // adjust to cache line offset (assumed to be 64 bytes) + _data = reinterpret_cast( + (reinterpret_cast(_alloc) + 63) & ~((uintptr_t)0x3fu)); + } + + MemoryBlock(MemoryBlock&& other) + : _itemSize(other._itemSize), _nrAlloc(other._nrAlloc), _nrUsed(other._nrUsed), _alloc(other._alloc), _data(other._data) { + other._nrAlloc = 0; + other._nrUsed = 0; + other._alloc = nullptr; + other._data = nullptr; + } + + MemoryBlock& operator=(MemoryBlock&& other) { + if (this != &other) { + TRI_ASSERT(_itemSize == other._itemSize); + + delete [] _alloc; + _nrAlloc = other._nrAlloc; + _nrUsed = other._nrUsed; + _alloc = other._alloc; + _data = other._data; + + other._nrAlloc = 0; + other._nrUsed = 0; + other._alloc = nullptr; + other._data = nullptr; + } + + return *this; + } + + ~MemoryBlock() { + delete[] _alloc; + } + + void* next() { + TRI_ASSERT(_nrUsed < _nrAlloc); + return static_cast(_data + (_itemSize * _nrUsed++)); + } + + inline bool full() const { + return _nrUsed == _nrAlloc; + } + + size_t memoryUsage() const { + return (_data - _alloc) + _itemSize * _nrAlloc; + } + + private: + size_t const _itemSize; + size_t _nrAlloc; + size_t _nrUsed; + char* _alloc; + char* _data; + }; + + public: + FixedSizeAllocator(FixedSizeAllocator const&) = delete; + FixedSizeAllocator& operator=(FixedSizeAllocator const&) = delete; + + explicit FixedSizeAllocator(size_t itemSize) + : _itemSize(itemSize), _freelist(nullptr) { + _blocks.reserve(4); + } + + ~FixedSizeAllocator() {} + + void* allocate() { + if (_freelist != nullptr) { + void* element = _freelist; + _freelist = *reinterpret_cast(_freelist); + return element; + } + + if (_blocks.empty() || _blocks.back()->full()) { + allocateBlock(); + } + TRI_ASSERT(!_blocks.empty()); + TRI_ASSERT(!_blocks.back()->full()); + + return _blocks.back()->next(); + } + + void deallocateAll() { + _blocks.clear(); + _freelist = nullptr; + } + + void deallocate(void* value) noexcept { + *reinterpret_cast(value) = _freelist; + _freelist = value; + } + + size_t memoryUsage() const { + size_t total = 0; + for (auto const& it : _blocks) { + total += it->memoryUsage(); + } + return total; + } + + private: + void allocateBlock() { + size_t const size = 128 << (std::min)(size_t(8), _blocks.size()); + auto block = std::make_unique(_itemSize, size); + _blocks.emplace_back(block.get()); + block.release(); + } + + std::vector> _blocks; + size_t _itemSize; + void* _freelist; +}; + +} + +#endif From 5c868d05dc592071c63ac2690b6e3976eec340b3 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Fri, 9 Dec 2016 23:27:45 +0100 Subject: [PATCH 02/12] ported velocypack compatibility fix from arangodb/velocypack --- .../velocypack/include/velocypack/Slice.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/3rdParty/velocypack/include/velocypack/Slice.h b/3rdParty/velocypack/include/velocypack/Slice.h index 7088ec6983..9b2d005760 100644 --- a/3rdParty/velocypack/include/velocypack/Slice.h +++ b/3rdParty/velocypack/include/velocypack/Slice.h @@ -468,14 +468,14 @@ class Slice { if (!isExternal()) { throw Exception(Exception::InvalidValueType, "Expecting type External"); } - return extractValue(); + return extractPointer(); } // returns the Slice managed by an External or the Slice itself if it's not // an External Slice resolveExternal() const { if (*_start == 0x1d) { - return Slice(extractValue()); + return Slice(extractPointer()); } return *this; } @@ -485,7 +485,7 @@ class Slice { Slice resolveExternals() const { char const* current = reinterpret_cast(_start); while (*current == 0x1d) { - current = Slice(current).extractValue(); + current = Slice(current).extractPointer(); } return Slice(current); } @@ -908,15 +908,14 @@ class Slice { } #endif - // extracts a value from the slice and converts it into a - // built-in type - template - T extractValue() const { + // extracts a pointer from the slice and converts it into a + // built-in pointer type + char const* extractPointer() const { union { - T value; - char binary[sizeof(T)]; + char const* value; + char binary[sizeof(char const*)]; }; - memcpy(&binary[0], _start + 1, sizeof(T)); + memcpy(&binary[0], _start + 1, sizeof(char const*)); return value; } }; From f2a6864db404e134e66bac6621efcb68eb0c0583 Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Mon, 12 Dec 2016 02:29:38 +0100 Subject: [PATCH 03/12] Require at least one arg in route def --- js/server/modules/@arangodb/foxx/router/router.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/server/modules/@arangodb/foxx/router/router.js b/js/server/modules/@arangodb/foxx/router/router.js index 008f6cb4b0..518e40e823 100644 --- a/js/server/modules/@arangodb/foxx/router/router.js +++ b/js/server/modules/@arangodb/foxx/router/router.js @@ -106,7 +106,7 @@ const Router = module.exports = [['path', 'string'], ...repeat(Math.max(1, args.length - 2), ['handler', 'function']), ['name', 'string']], [['path', 'string'], ...repeat(Math.max(1, args.length - 1), ['handler', 'function'])], [...repeat(Math.max(1, args.length - 1), ['handler', 'function']), ['name', 'string']], - repeat(args.length, ['handler', 'function']) + repeat(Math.max(1, args.length - 1), ['handler', 'function']) ); const path = argv.path; const handler = argv.handler; @@ -130,7 +130,7 @@ ALL_METHODS.forEach(function (method) { [['path', 'string'], ...repeat(Math.max(1, args.length - 2), ['handler', 'function']), ['name', 'string']], [['path', 'string'], ...repeat(Math.max(1, args.length - 1), ['handler', 'function'])], [...repeat(Math.max(1, args.length - 1), ['handler', 'function']), ['name', 'string']], - repeat(args.length, ['handler', 'function']) + repeat(Math.max(1, args.length - 1), ['handler', 'function']) ); const path = argv.path; const handler = argv.handler; From cd98d53ec4ca03af0e07b1d5b7b736c1258c4b38 Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Mon, 12 Dec 2016 02:30:05 +0100 Subject: [PATCH 04/12] Add swagger route to Foxx API --- js/apps/system/_api/foxx/APP/index.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/js/apps/system/_api/foxx/APP/index.js b/js/apps/system/_api/foxx/APP/index.js index 87a6675b1b..eabe89187e 100644 --- a/js/apps/system/_api/foxx/APP/index.js +++ b/js/apps/system/_api/foxx/APP/index.js @@ -8,6 +8,7 @@ const semver = require('semver'); const actions = require('@arangodb/actions'); const ArangoError = require('@arangodb').ArangoError; const errors = require('@arangodb').errors; +const swaggerJson = require('@arangodb/foxx/legacy/swagger').swaggerJson; const fm = require('@arangodb/foxx/manager'); const fmu = require('@arangodb/foxx/manager-utils'); const createRouter = require('@arangodb/foxx/router'); @@ -400,3 +401,14 @@ instanceRouter.get('/readme', (req, res) => { .description(dd` Fetches the service's README or README.md file's contents if any. `); + +instanceRouter.get('/swagger', (req, res) => { + swaggerJson(req, res, { + mount: req.service.mount + }); +}) +.response(200, joi.object(), `Service Swagger description.`) +.summary(`Swagger description`) +.description(dd` + Fetches the Swagger API description for the service at the given mount path. +`); From 9a81d2cb6308e3bd296bed78fce876538708ca3e Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Mon, 12 Dec 2016 02:31:47 +0100 Subject: [PATCH 05/12] Add support for Swagger tags --- .../@arangodb/foxx/router/swagger-context.js | 27 +++++++++++++++++++ .../@arangodb/foxx/templates/dcRouter.js.tmpl | 3 +++ .../@arangodb/foxx/templates/ecRouter.js.tmpl | 4 +++ 3 files changed, 34 insertions(+) diff --git a/js/server/modules/@arangodb/foxx/router/swagger-context.js b/js/server/modules/@arangodb/foxx/router/swagger-context.js index 8144d0292d..79dd42b94c 100644 --- a/js/server/modules/@arangodb/foxx/router/swagger-context.js +++ b/js/server/modules/@arangodb/foxx/router/swagger-context.js @@ -49,6 +49,14 @@ const PARSED_JSON_MIME = (function (mime) { ])); }(MIME_JSON)); +const repeat = (times, value) => { + const arr = Array(times); + for (let i = 0; i < times; i++) { + arr[i] = value; + } + return arr; +}; + module.exports = exports = class SwaggerContext { constructor (path) { @@ -75,6 +83,7 @@ module.exports = exports = this._pathParams = new Map(); this._pathParamNames = []; this._pathTokens = tokenize(path, this); + this._tags = new Set(); } header (...args) { @@ -262,6 +271,18 @@ module.exports = exports = return this; } + tag (...tags) { + tags = check( + 'endpoint.tag', + tags, + [...repeat(Math.max(1, tags.length), ['tag', 'string'])] + ); + for (const tag of tags) { + this._tags.add(tag); + } + return this; + } + deprecated (...args) { const [flag] = check( 'endpoint.summary', @@ -284,6 +305,9 @@ module.exports = exports = for (const response of swaggerObj._responses.entries()) { this._responses.set(response[0], response[1]); } + for (const tag of swaggerObj._tags) { + this._tags.add(tag); + } if (!this._bodyParam && swaggerObj._bodyParam) { this._bodyParam = swaggerObj._bodyParam; } @@ -335,6 +359,9 @@ module.exports = exports = if (this._summary) { operation.summary = this._summary; } + if (this._tags) { + operation.tags = Array.from(this._tags); + } if (this._bodyParam) { operation.consumes = ( this._bodyParam.contentTypes diff --git a/js/server/modules/@arangodb/foxx/templates/dcRouter.js.tmpl b/js/server/modules/@arangodb/foxx/templates/dcRouter.js.tmpl index 1c3abd1f54..5e095d5de6 100644 --- a/js/server/modules/@arangodb/foxx/templates/dcRouter.js.tmpl +++ b/js/server/modules/@arangodb/foxx/templates/dcRouter.js.tmpl @@ -21,6 +21,9 @@ const router = createRouter(); module.exports = router; +router.tag('<%= document %>'); + + router.get(function (req, res) { res.send(<%= documents %>.all()); }, 'list') diff --git a/js/server/modules/@arangodb/foxx/templates/ecRouter.js.tmpl b/js/server/modules/@arangodb/foxx/templates/ecRouter.js.tmpl index 270101f539..37e485f390 100644 --- a/js/server/modules/@arangodb/foxx/templates/ecRouter.js.tmpl +++ b/js/server/modules/@arangodb/foxx/templates/ecRouter.js.tmpl @@ -20,6 +20,10 @@ const HTTP_CONFLICT = status('conflict'); const router = createRouter(); module.exports = router; + +router.tag('<%= document %>'); + + const New<%= model %> = Object.assign({}, <%= model %>, { schema: Object.assign({}, <%= model %>.schema, { _from: joi.string(), From 3e93980ae925c265ce27f7b2a6d8d7d87f525d45 Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Mon, 12 Dec 2016 02:32:38 +0100 Subject: [PATCH 06/12] Generate unique Swagger operationIds --- .../modules/@arangodb/foxx/router/tree.js | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/js/server/modules/@arangodb/foxx/router/tree.js b/js/server/modules/@arangodb/foxx/router/tree.js index 6cd6a3815a..0d8cc6ac2d 100644 --- a/js/server/modules/@arangodb/foxx/router/tree.js +++ b/js/server/modules/@arangodb/foxx/router/tree.js @@ -36,6 +36,10 @@ const validation = require('@arangodb/foxx/router/validation'); const $_ROUTES = Symbol.for('@@routes'); // routes and child routers const $_MIDDLEWARE = Symbol.for('@@middleware'); // middleware +function ucFirst (str) { + return str[0].toUpperCase() + str.slice(1); +} + module.exports = class Tree { constructor (context, router) { @@ -135,11 +139,16 @@ module.exports = buildSwaggerPaths () { const paths = {}; + const ids = new Set(); for (const route of this.flatten()) { const parts = []; const swagger = new SwaggerContext(); let i = 0; + const names = []; for (const item of route) { + if (item.name) { + names.push(item.name); + } if (item.router) { swagger._merge(item, true); } else { @@ -164,10 +173,28 @@ module.exports = } const pathItem = paths[path]; const operation = swagger._buildOperation(); + if (names.length) { + operation.operationId = names + .map((name, i) => (i ? ucFirst(name) : name)) + .join(''); + } for (let method of swagger._methods) { method = method.toLowerCase(); if (!pathItem[method]) { - pathItem[method] = operation; + if (operation.operationId && swagger._methods.length > 1) { + const op = Object.assign({}, operation); + pathItem[method] = op; + if (ids.has(op.operationId)) { + let i = 2; + while (ids.has(op.operationId + i)) { + i++; + } + op.operationId += i; + } + ids.add(op.operationId); + } else { + pathItem[method] = operation; + } } } } From ad4ba248a52683ac9b4314eb5590405171c648e3 Mon Sep 17 00:00:00 2001 From: Jan Steemann Date: Mon, 12 Dec 2016 08:49:59 +0100 Subject: [PATCH 07/12] try to fix travis build --- arangod/Cluster/ClusterInfo.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 232e2559a1..31555f7125 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -2004,8 +2004,8 @@ void ClusterInfo::loadServers() { } AgencyCommResult result = _agency.sendTransactionWithFailover( - AgencyReadTransaction({AgencyCommManager::path(prefixServers), - AgencyCommManager::path(mapUniqueToShortId)})); + AgencyReadTransaction(std::vector({AgencyCommManager::path(prefixServers), + AgencyCommManager::path(mapUniqueToShortId)}))); if (result.successful()) { From 4f46fbe239cce8e51dfb80b75e41869e123fb1fb Mon Sep 17 00:00:00 2001 From: jsteemann Date: Mon, 12 Dec 2016 09:09:08 +0100 Subject: [PATCH 08/12] cppcheck --- arangod/Agency/MoveShard.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arangod/Agency/MoveShard.cpp b/arangod/Agency/MoveShard.cpp index e5effed513..4ac4a28633 100644 --- a/arangod/Agency/MoveShard.cpp +++ b/arangod/Agency/MoveShard.cpp @@ -76,7 +76,7 @@ bool MoveShard::create() { _jb->openObject(); // Lookup from server - if (_from.find("DBServer") == 0) { + if (_from.compare("DBServer") == 0) { try { _from = uuidLookup(_snapshot, _from); } catch (...) { @@ -84,7 +84,7 @@ bool MoveShard::create() { "MoveShard: From server " << _from << " does not exist"; } } - if (_to.find("DBServer") == 0) { + if (_to.compare("DBServer") == 0) { try { _to = uuidLookup(_snapshot, _to); } catch (...) { From e9322eea8d01df183b0d42566824036927565b9c Mon Sep 17 00:00:00 2001 From: jsteemann Date: Mon, 12 Dec 2016 10:10:58 +0100 Subject: [PATCH 09/12] cppcheck --- arangod/Agency/MoveShard.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arangod/Agency/MoveShard.cpp b/arangod/Agency/MoveShard.cpp index 4ac4a28633..8e65e722b4 100644 --- a/arangod/Agency/MoveShard.cpp +++ b/arangod/Agency/MoveShard.cpp @@ -50,7 +50,7 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent, } } } catch (std::exception const& e) { - LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__; + LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << ": " << __FILE__ << ":" << __LINE__; finish("Shards/" + _shard, false, e.what()); } } @@ -232,7 +232,7 @@ bool MoveShard::start() { try { todo.add(_jb->slice()[0].get(_agencyPrefix + toDoPrefix + _jobId)); } catch (std::exception const& e) { - LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__; + LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << ": " << __FILE__ << ":" << __LINE__; } } todo.close(); From ec18efc80e3ddb09dc9daa288d7a8204ce6103c0 Mon Sep 17 00:00:00 2001 From: Kaveh Vahedipour Date: Mon, 12 Dec 2016 10:29:41 +0100 Subject: [PATCH 10/12] moveShard jobs running --- arangod/Agency/CleanOutServer.cpp | 10 ++++ arangod/Agency/MoveShard.cpp | 81 ++++++++++++++++++++++++------- 2 files changed, 73 insertions(+), 18 deletions(-) diff --git a/arangod/Agency/CleanOutServer.cpp b/arangod/Agency/CleanOutServer.cpp index c8043511b2..0faff8e964 100644 --- a/arangod/Agency/CleanOutServer.cpp +++ b/arangod/Agency/CleanOutServer.cpp @@ -121,6 +121,16 @@ JOB_STATUS CleanOutServer::status() { bool CleanOutServer::create() { // Only through shrink cluster + // Lookup server + if (_server.find("DBServer") == 0) { + try { + _server = uuidLookup(_snapshot, _server); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: To server " << _server << " does not exist"; + } + } + LOG_TOPIC(INFO, Logger::AGENCY) << "Todo: Clean out server " + _server + " for shrinkage"; diff --git a/arangod/Agency/MoveShard.cpp b/arangod/Agency/MoveShard.cpp index e5effed513..065bf9e995 100644 --- a/arangod/Agency/MoveShard.cpp +++ b/arangod/Agency/MoveShard.cpp @@ -39,6 +39,7 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent, _shard(shard), _from(from), _to(to) { + try { JOB_STATUS js = status(); @@ -58,6 +59,27 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent, MoveShard::~MoveShard() {} bool MoveShard::create() { + + // Lookup from server + if (_from.find("DBServer") == 0) { + try { + _from = uuidLookup(_snapshot, _from); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: From server " << _from << " does not exist"; + } + } + + // Lookup to Server + if (_to.find("DBServer") == 0) { + try { + _to = uuidLookup(_snapshot, _to); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: To server " << _to << " does not exist"; + } + } + LOG_TOPIC(INFO, Logger::AGENCY) << "Todo: Move shard " + _shard + " from " + _from + " to " << _to; @@ -75,24 +97,6 @@ bool MoveShard::create() { _jb->openArray(); _jb->openObject(); - // Lookup from server - if (_from.find("DBServer") == 0) { - try { - _from = uuidLookup(_snapshot, _from); - } catch (...) { - LOG_TOPIC(ERR, Logger::AGENCY) << - "MoveShard: From server " << _from << " does not exist"; - } - } - if (_to.find("DBServer") == 0) { - try { - _to = uuidLookup(_snapshot, _to); - } catch (...) { - LOG_TOPIC(ERR, Logger::AGENCY) << - "MoveShard: To server " << _to << " does not exist"; - } - } - if (_from == _to) { path = _agencyPrefix + failedPrefix + _jobId; _jb->add("timeFinished", VPackValue(now)); @@ -138,6 +142,26 @@ bool MoveShard::create() { } bool MoveShard::start() { + + // Lookup from server + if (_from.find("DBServer") == 0) { + try { + _from = uuidLookup(_snapshot, _from); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: From server " << _from << " does not exist"; + } + } + + // Lookup to Server + if (_to.find("DBServer") == 0) { + try { + _to = uuidLookup(_snapshot, _to); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: To server " << _to << " does not exist"; + } + } // Are we distributeShardsLiking other shard? // Invoke moveShard there @@ -355,6 +379,27 @@ JOB_STATUS MoveShard::status() { _to = _snapshot(pos[status] + _jobId + "/toServer").getString(); _shard = _snapshot(pos[status] + _jobId + "/shards").slice()[0].copyString(); + + // Lookup from server + if (_from.find("DBServer") == 0) { + try { + _from = uuidLookup(_snapshot, _from); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: From server " << _from << " does not exist"; + } + } + + // Lookup to Server + if (_to.find("DBServer") == 0) { + try { + _to = uuidLookup(_snapshot, _to); + } catch (...) { + LOG_TOPIC(ERR, Logger::AGENCY) << + "MoveShard: To server " << _to << " does not exist"; + } + } + } catch (std::exception const& e) { std::string err = std::string("Failed to find job ") + _jobId + " in agency: " + e.what(); From e68ba685d598a632964a5f8cf2e7c54c6bf29843 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Mon, 12 Dec 2016 11:53:30 +0100 Subject: [PATCH 11/12] fix VS warning --- lib/Basics/IndexBucket.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Basics/IndexBucket.h b/lib/Basics/IndexBucket.h index 3644ff01c7..41adfb68d9 100644 --- a/lib/Basics/IndexBucket.h +++ b/lib/Basics/IndexBucket.h @@ -134,7 +134,7 @@ struct IndexBucket { } #endif - _nrAlloc = numberElements; + _nrAlloc = static_cast(numberElements); } catch (...) { deallocateTempfile(); TRI_ASSERT(_file == -1); From 27099a1f7300b4dc0404d4e705d172a90ce6caed Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Mon, 12 Dec 2016 13:12:35 +0100 Subject: [PATCH 12/12] Clarify the default value of req.body See #2215. --- Documentation/Books/Manual/Foxx/Router/Endpoints.mdpp | 2 ++ Documentation/Books/Manual/Foxx/Router/Request.mdpp | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/Books/Manual/Foxx/Router/Endpoints.mdpp b/Documentation/Books/Manual/Foxx/Router/Endpoints.mdpp index f04f57f35e..940660382c 100644 --- a/Documentation/Books/Manual/Foxx/Router/Endpoints.mdpp +++ b/Documentation/Books/Manual/Foxx/Router/Endpoints.mdpp @@ -115,6 +115,8 @@ body Defines the request body recognized by the endpoint. There can only be one request body definition per endpoint. The definition will also be shown in the route details in the API documentation. +In the absence of a request body definition, the request object's *body* property will be initialized to the unprocessed *rawBody* buffer. + If the endpoint is a child router, all routes of that router will use this body definition unless overridden. If the endpoint is a middleware, the request body will only be parsed once (i.e. the MIME types of the route matching the same request will be ignored but the body will still be validated again). **Arguments** diff --git a/Documentation/Books/Manual/Foxx/Router/Request.mdpp b/Documentation/Books/Manual/Foxx/Router/Request.mdpp index 9bdb4ddcef..6dcaeb6b3f 100644 --- a/Documentation/Books/Manual/Foxx/Router/Request.mdpp +++ b/Documentation/Books/Manual/Foxx/Router/Request.mdpp @@ -19,7 +19,7 @@ The request object specifies the following properties: * **body**: `any` - The processed and validated request body for the current route. + The processed and validated request body for the current route. If no body has been defined for the current route, the value will be identical to *rawBody*. For details on how request bodies can be processed and validated by Foxx see the [body method of the endpoint object](Endpoints.md#body).