1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Simon Grätzer 2016-12-12 13:21:51 +01:00
commit 3a4a07f13d
44 changed files with 452 additions and 182 deletions

View File

@ -468,14 +468,14 @@ class Slice {
if (!isExternal()) {
throw Exception(Exception::InvalidValueType, "Expecting type External");
}
return extractValue<char const*>();
return extractPointer();
}
// returns the Slice managed by an External or the Slice itself if it's not
// an External
Slice resolveExternal() const {
if (*_start == 0x1d) {
return Slice(extractValue<char const*>());
return Slice(extractPointer());
}
return *this;
}
@ -485,7 +485,7 @@ class Slice {
Slice resolveExternals() const {
char const* current = reinterpret_cast<char const*>(_start);
while (*current == 0x1d) {
current = Slice(current).extractValue<char const*>();
current = Slice(current).extractPointer();
}
return Slice(current);
}
@ -908,15 +908,14 @@ class Slice {
}
#endif
// extracts a value from the slice and converts it into a
// built-in type
template <typename T>
T extractValue() const {
// extracts a pointer from the slice and converts it into a
// built-in pointer type
char const* extractPointer() const {
union {
T value;
char binary[sizeof(T)];
char const* value;
char binary[sizeof(char const*)];
};
memcpy(&binary[0], _start + 1, sizeof(T));
memcpy(&binary[0], _start + 1, sizeof(char const*));
return value;
}
};

View File

@ -115,6 +115,8 @@ body
Defines the request body recognized by the endpoint. There can only be one request body definition per endpoint. The definition will also be shown in the route details in the API documentation.
In the absence of a request body definition, the request object's *body* property will be initialized to the unprocessed *rawBody* buffer.
If the endpoint is a child router, all routes of that router will use this body definition unless overridden. If the endpoint is a middleware, the request body will only be parsed once (i.e. the MIME types of the route matching the same request will be ignored but the body will still be validated again).
**Arguments**

View File

@ -19,7 +19,7 @@ The request object specifies the following properties:
* **body**: `any`
The processed and validated request body for the current route.
The processed and validated request body for the current route. If no body has been defined for the current route, the value will be identical to *rawBody*.
For details on how request bodies can be processed and validated by Foxx see the [body method of the endpoint object](Endpoints.md#body).

View File

@ -121,6 +121,16 @@ JOB_STATUS CleanOutServer::status() {
bool CleanOutServer::create() { // Only through shrink cluster
// Lookup server
if (_server.find("DBServer") == 0) {
try {
_server = uuidLookup(_snapshot, _server);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: To server " << _server << " does not exist";
}
}
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: Clean out server " + _server + " for shrinkage";

View File

@ -39,6 +39,7 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent,
_shard(shard),
_from(from),
_to(to) {
try {
JOB_STATUS js = status();
@ -50,7 +51,7 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent,
}
}
} catch (std::exception const& e) {
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__;
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << ": " << __FILE__ << ":" << __LINE__;
finish("Shards/" + _shard, false, e.what());
}
}
@ -58,6 +59,26 @@ MoveShard::MoveShard(Node const& snapshot, Agent* agent,
MoveShard::~MoveShard() {}
bool MoveShard::create() {
// Lookup from server
if (_from.compare("DBServer") == 0) {
try {
_from = uuidLookup(_snapshot, _from);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: From server " << _from << " does not exist";
}
}
// Lookup to Server
if (_to.find("DBServer") == 0) {
try {
_to = uuidLookup(_snapshot, _to);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: To server " << _to << " does not exist";
}
}
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: Move shard " + _shard + " from " + _from + " to " << _to;
@ -75,24 +96,6 @@ bool MoveShard::create() {
_jb->openArray();
_jb->openObject();
// Lookup from server
if (_from.find("DBServer") == 0) {
try {
_from = uuidLookup(_snapshot, _from);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: From server " << _from << " does not exist";
}
}
if (_to.find("DBServer") == 0) {
try {
_to = uuidLookup(_snapshot, _to);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: To server " << _to << " does not exist";
}
}
if (_from == _to) {
path = _agencyPrefix + failedPrefix + _jobId;
_jb->add("timeFinished", VPackValue(now));
@ -138,6 +141,26 @@ bool MoveShard::create() {
}
bool MoveShard::start() {
// Lookup from server
if (_from.find("DBServer") == 0) {
try {
_from = uuidLookup(_snapshot, _from);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: From server " << _from << " does not exist";
}
}
// Lookup to Server
if (_to.find("DBServer") == 0) {
try {
_to = uuidLookup(_snapshot, _to);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: To server " << _to << " does not exist";
}
}
// Are we distributeShardsLiking other shard?
// Invoke moveShard there
@ -232,7 +255,7 @@ bool MoveShard::start() {
try {
todo.add(_jb->slice()[0].get(_agencyPrefix + toDoPrefix + _jobId));
} catch (std::exception const& e) {
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__;
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << ": " << __FILE__ << ":" << __LINE__;
}
}
todo.close();
@ -355,6 +378,27 @@ JOB_STATUS MoveShard::status() {
_to = _snapshot(pos[status] + _jobId + "/toServer").getString();
_shard =
_snapshot(pos[status] + _jobId + "/shards").slice()[0].copyString();
// Lookup from server
if (_from.find("DBServer") == 0) {
try {
_from = uuidLookup(_snapshot, _from);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: From server " << _from << " does not exist";
}
}
// Lookup to Server
if (_to.find("DBServer") == 0) {
try {
_to = uuidLookup(_snapshot, _to);
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"MoveShard: To server " << _to << " does not exist";
}
}
} catch (std::exception const& e) {
std::string err =
std::string("Failed to find job ") + _jobId + " in agency: " + e.what();

View File

@ -84,7 +84,7 @@ V8Expression* Executor::generateExpression(AstNode const* node) {
v8::Handle<v8::Script> compiled = v8::Script::Compile(
TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--"));
if (! compiled.IsEmpty()) {
if (!compiled.IsEmpty()) {
v8::Handle<v8::Value> func(compiled->Run());
// exit early if an error occurred
@ -108,7 +108,7 @@ V8Expression* Executor::generateExpression(AstNode const* node) {
HandleV8Error(tryCatch, empty, _buffer, true);
// well we're almost sure we never reach this since the above call should throw:
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to compile AQL script code");
}
}
@ -133,7 +133,7 @@ int Executor::executeExpression(Query* query, AstNode const* node,
v8::Handle<v8::Script> compiled = v8::Script::Compile(
TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--"));
if (! compiled.IsEmpty()) {
if (!compiled.IsEmpty()) {
v8::Handle<v8::Value> func(compiled->Run());
@ -175,7 +175,7 @@ int Executor::executeExpression(Query* query, AstNode const* node,
HandleV8Error(tryCatch, empty, _buffer, true);
// well we're almost sure we never reach this since the above call should throw:
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to compile AQL script code");
}
}

View File

@ -4010,7 +4010,7 @@ AqlValue Functions::Fulltext(arangodb::aql::Query* query,
TRI_QueryFulltextIndex(fulltextIndex->internals(), ft);
if (queryResult == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
TRI_ASSERT(trx->hasDitch(cid));

View File

@ -499,7 +499,7 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
auto block = static_cast<BlockWithClients*>(query->engine()->root());
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
}
number = block->remainingForShard(shardId);
}
@ -516,7 +516,7 @@ void RestAqlHandler::getInfoQuery(std::string const& operation,
auto block = static_cast<BlockWithClients*>(query->engine()->root());
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
}
hasMore = block->hasMoreForShard(shardId);
}
@ -719,7 +719,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
auto block = static_cast<BlockWithClients*>(query->engine()->root());
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
}
items.reset(block->getSomeForShard(atLeast, atMost, shardId));
}
@ -755,7 +755,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
static_cast<BlockWithClients*>(query->engine()->root());
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
}
skipped = block->skipSomeForShard(atLeast, atMost, shardId);
}
@ -783,7 +783,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
static_cast<BlockWithClients*>(query->engine()->root());
if (block->getPlanNode()->getType() != ExecutionNode::SCATTER &&
block->getPlanNode()->getType() != ExecutionNode::DISTRIBUTE) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unexpected node type");
}
exhausted = block->skipForShard(number, shardId);
}

View File

@ -203,7 +203,7 @@ void Scopes::replaceVariable(Variable* variable) {
}
}
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find AQL variable in scopes");
}
/// @brief checks whether a variable exists in any scope

View File

@ -60,7 +60,7 @@ static AstNode* createGlobalCondition(Ast* ast, AstNode const* condition) {
type = NODE_TYPE_OPERATOR_BINARY_NIN;
break;
default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unsupported operator type");
}
auto quantifier = condition->getMemberUnchecked(2);
TRI_ASSERT(quantifier->type == NODE_TYPE_QUANTIFIER);
@ -69,7 +69,7 @@ static AstNode* createGlobalCondition(Ast* ast, AstNode const* condition) {
if (val == Quantifier::NONE) {
auto it = Ast::NegatedOperators.find(type);
if (it == Ast::NegatedOperators.end()) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unsupported operator type");
}
type = it->second;
}

View File

@ -2004,8 +2004,8 @@ void ClusterInfo::loadServers() {
}
AgencyCommResult result = _agency.sendTransactionWithFailover(
AgencyReadTransaction({AgencyCommManager::path(prefixServers),
AgencyCommManager::path(mapUniqueToShortId)}));
AgencyReadTransaction(std::vector<std::string>({AgencyCommManager::path(prefixServers),
AgencyCommManager::path(mapUniqueToShortId)})));
if (result.successful()) {

View File

@ -1641,7 +1641,7 @@ static void Return_PrepareClusterCommResultForJS(
// FIXME HANDLE VPP
auto httpRequest = std::dynamic_pointer_cast<HttpRequest>(res.answer);
if (httpRequest == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
// The headers:

View File

@ -85,7 +85,7 @@ FulltextIndex::FulltextIndex(TRI_idx_iid_t iid,
_sparse = true;
if (_fields.size() != 1) {
// We need exactly 1 attribute
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "fulltext index definition should have exactly one attribute");
}
auto& attribute = _fields[0];
_attr.reserve(attribute.size());

View File

@ -26,6 +26,7 @@
#include "Aql/AstNode.h"
#include "Aql/SortCondition.h"
#include "Basics/Exceptions.h"
#include "Basics/FixedSizeAllocator.h"
#include "Basics/VelocyPackHelper.h"
#include "Indexes/IndexLookupContext.h"
#include "Indexes/SimpleAttributeEqualityMatcher.h"
@ -432,13 +433,6 @@ HashIndex::UniqueArray::UniqueArray(
/// @brief destroy the unique array
HashIndex::UniqueArray::~UniqueArray() {
if (_hashArray != nullptr) {
auto cb = [this](HashIndexElement* element) -> bool {
element->free(); return true;
};
_hashArray->invokeOnAllElements(cb);
}
delete _hashArray;
delete _hashElement;
delete _isEqualElElByKey;
@ -460,13 +454,6 @@ HashIndex::MultiArray::MultiArray(size_t numPaths,
/// @brief destroy the multi array
HashIndex::MultiArray::~MultiArray() {
if (_hashArray != nullptr) {
auto cb = [this](HashIndexElement* element) -> bool {
element->free(); return true;
};
_hashArray->invokeOnAllElements(cb);
}
delete _hashArray;
delete _hashElement;
delete _isEqualElElByKey;
@ -474,7 +461,7 @@ HashIndex::MultiArray::~MultiArray() {
HashIndex::HashIndex(TRI_idx_iid_t iid, LogicalCollection* collection,
VPackSlice const& info)
: PathBasedIndex(iid, collection, info, false), _uniqueArray(nullptr) {
: PathBasedIndex(iid, collection, info, sizeof(TRI_voc_rid_t) + sizeof(uint32_t), false), _uniqueArray(nullptr) {
uint32_t indexBuckets = 1;
if (collection != nullptr) {
@ -650,7 +637,7 @@ int HashIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& hashElement : elements) {
hashElement->free();
_allocator->deallocate(hashElement);
}
return res;
}
@ -668,7 +655,7 @@ int HashIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (result != TRI_ERROR_NO_ERROR) {
res = result;
}
hashElement->free();
_allocator->deallocate(hashElement);
}
return res;
@ -686,10 +673,11 @@ int HashIndex::batchInsert(arangodb::Transaction* trx,
int HashIndex::unload() {
if (_unique) {
_uniqueArray->_hashArray->truncate([](HashIndexElement* element) -> bool { element->free(); return true; });
_uniqueArray->_hashArray->truncate([](HashIndexElement*) -> bool { return true; });
} else {
_multiArray->_hashArray->truncate([](HashIndexElement* element) -> bool { element->free(); return true; });
_multiArray->_hashArray->truncate([](HashIndexElement*) -> bool { return true; });
}
_allocator->deallocateAll();
return TRI_ERROR_NO_ERROR;
}
@ -751,7 +739,7 @@ int HashIndex::insertUnique(arangodb::Transaction* trx, TRI_voc_rid_t revisionId
if (res != TRI_ERROR_NO_ERROR) {
for (auto& it : elements) {
// free all elements to prevent leak
it->free();
_allocator->deallocate(it);
}
return res;
@ -775,7 +763,7 @@ int HashIndex::insertUnique(arangodb::Transaction* trx, TRI_voc_rid_t revisionId
if (res != TRI_ERROR_NO_ERROR) {
for (size_t j = i; j < n; ++j) {
// Free all elements that are not yet in the index
elements[j]->free();
_allocator->deallocate(elements[j]);
}
// Already indexed elements will be removed by the rollback
break;
@ -796,7 +784,7 @@ int HashIndex::batchInsertUnique(arangodb::Transaction* trx,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& it : elements) {
// free all elements to prevent leak
it->free();
_allocator->deallocate(it);
}
return res;
}
@ -823,7 +811,7 @@ int HashIndex::batchInsertUnique(arangodb::Transaction* trx,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& it : elements) {
// free all elements to prevent leak
it->free();
_allocator->deallocate(it);
}
}
@ -837,7 +825,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& hashElement : elements) {
hashElement->free();
_allocator->deallocate(hashElement);
}
return res;
}
@ -855,7 +843,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (found != nullptr) {
// already got the exact same index entry. now free our local element...
element->free();
_allocator->deallocate(element);
}
};
@ -875,7 +863,7 @@ int HashIndex::insertMulti(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
for (size_t j = i; j < n; ++j) {
// Free all elements that are not yet in the index
elements[j]->free();
_allocator->deallocate(elements[j]);
}
for (size_t j = 0; j < i; ++j) {
// Remove all already indexed elements and free them
@ -903,7 +891,7 @@ int HashIndex::batchInsertMulti(arangodb::Transaction* trx,
// Filling the elements failed for some reason. Assume loading as failed
for (auto& el : elements) {
// Free all elements that are not yet in the index
el->free();
_allocator->deallocate(el);
}
return res;
}
@ -943,7 +931,7 @@ int HashIndex::removeUniqueElement(arangodb::Transaction* trx,
}
return TRI_ERROR_INTERNAL;
}
old->free();
_allocator->deallocate(old);
return TRI_ERROR_NO_ERROR;
}
@ -963,7 +951,7 @@ int HashIndex::removeMultiElement(arangodb::Transaction* trx,
}
return TRI_ERROR_INTERNAL;
}
old->free();
_allocator->deallocate(old);
return TRI_ERROR_NO_ERROR;
}

View File

@ -35,26 +35,13 @@ HashIndexElement::HashIndexElement(TRI_voc_rid_t revisionId, std::vector<std::pa
}
}
HashIndexElement* HashIndexElement::create(TRI_voc_rid_t revisionId, std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values) {
HashIndexElement* HashIndexElement::initialize(HashIndexElement* element,
TRI_voc_rid_t revisionId,
std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values) {
TRI_ASSERT(!values.empty());
void* space = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, baseMemoryUsage(values.size()), false);
if (space == nullptr) {
return nullptr;
}
try {
return new (space) HashIndexElement(revisionId, values);
} catch (...) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, space);
return nullptr;
}
return new (element) HashIndexElement(revisionId, values);
}
void HashIndexElement::free() {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, this);
}
/// @brief velocypack sub-object (for indexes, as part of IndexElement,
/// if offset is non-zero, then it is an offset into the VelocyPack data in
/// the datafile or WAL file. If offset is 0, then data contains the actual data
@ -126,26 +113,13 @@ SkiplistIndexElement::SkiplistIndexElement(TRI_voc_rid_t revisionId, std::vector
}
}
SkiplistIndexElement* SkiplistIndexElement::create(TRI_voc_rid_t revisionId, std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values) {
SkiplistIndexElement* SkiplistIndexElement::initialize(SkiplistIndexElement* element,
TRI_voc_rid_t revisionId,
std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values) {
TRI_ASSERT(!values.empty());
void* space = TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, baseMemoryUsage(values.size()), false);
if (space == nullptr) {
return nullptr;
}
try {
return new (space) SkiplistIndexElement(revisionId, values);
} catch (...) {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, space);
return nullptr;
}
return new (element) SkiplistIndexElement(revisionId, values);
}
void SkiplistIndexElement::free() {
TRI_Free(TRI_UNKNOWN_MEM_ZONE, this);
}
/// @brief velocypack sub-object (for indexes, as part of IndexElement,
/// if offset is non-zero, then it is an offset into the VelocyPack data in
/// the datafile or WAL file. If offset is 0, then data contains the actual data

View File

@ -140,9 +140,9 @@ struct HashIndexElement {
static uint64_t hash(std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values);
/// @brief allocate a new index element from a vector of slices
static HashIndexElement* create(TRI_voc_rid_t revisionId, std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values);
void free();
static HashIndexElement* initialize(HashIndexElement* memory,
TRI_voc_rid_t revisionId,
std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values);
private:
inline IndexElementValue* subObject(size_t position) {
@ -188,9 +188,9 @@ struct SkiplistIndexElement {
arangodb::velocypack::Slice slice(IndexLookupContext* context, size_t position) const;
/// @brief allocate a new index element from a vector of slices
static SkiplistIndexElement* create(TRI_voc_rid_t revisionId, std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values);
void free();
static SkiplistIndexElement* initialize(SkiplistIndexElement* element,
TRI_voc_rid_t revisionId,
std::vector<std::pair<arangodb::velocypack::Slice, uint32_t>> const& values);
private:
inline IndexElementValue* subObject(size_t position) {

View File

@ -23,6 +23,7 @@
#include "PathBasedIndex.h"
#include "Aql/AstNode.h"
#include "Basics/FixedSizeAllocator.h"
#include "Basics/VelocyPackHelper.h"
#include "Logger/Logger.h"
@ -53,7 +54,7 @@ arangodb::aql::AstNode const* PathBasedIndex::PermutationState::getValue()
/// @brief create the index
PathBasedIndex::PathBasedIndex(TRI_idx_iid_t iid,
arangodb::LogicalCollection* collection,
VPackSlice const& info, bool allowPartialIndex)
VPackSlice const& info, size_t baseSize, bool allowPartialIndex)
: Index(iid, collection, info),
_useExpansion(false),
_allowPartialIndex(allowPartialIndex) {
@ -69,10 +70,14 @@ PathBasedIndex::PathBasedIndex(TRI_idx_iid_t iid,
break;
}
}
_allocator.reset(new FixedSizeAllocator(baseSize + sizeof(IndexElementValue) * numPaths()));
}
/// @brief destroy the index
PathBasedIndex::~PathBasedIndex() {}
PathBasedIndex::~PathBasedIndex() {
_allocator->deallocateAll();
}
/// @brief whether or not the index is implicitly unique
/// this can be the case if the index is not declared as unique, but contains a
@ -121,14 +126,16 @@ int PathBasedIndex::fillElement(std::vector<T*>& elements,
if (slices.size() == n) {
// if shapes.size() != n, then the value is not inserted into the index
// because of index sparsity!
T* element = T::create(revisionId, slices);
T* element = static_cast<T*>(_allocator->allocate());
TRI_ASSERT(element != nullptr);
element = T::initialize(element, revisionId, slices);
if (element == nullptr) {
return TRI_ERROR_OUT_OF_MEMORY;
}
TRI_IF_FAILURE("FillElementOOM") {
// clean up manually
element->free();
_allocator->deallocate(element);
return TRI_ERROR_OUT_OF_MEMORY;
}
@ -139,7 +146,7 @@ int PathBasedIndex::fillElement(std::vector<T*>& elements,
elements.emplace_back(element);
} catch (...) {
element->free();
_allocator->deallocate(element);
return TRI_ERROR_OUT_OF_MEMORY;
}
}
@ -155,14 +162,16 @@ int PathBasedIndex::fillElement(std::vector<T*>& elements,
for (auto& info : toInsert) {
TRI_ASSERT(info.size() == n);
T* element = T::create(revisionId, info);
T* element = static_cast<T*>(_allocator->allocate());
TRI_ASSERT(element != nullptr);
element = T::initialize(element, revisionId, info);
if (element == nullptr) {
return TRI_ERROR_OUT_OF_MEMORY;
}
TRI_IF_FAILURE("FillElementOOM") {
// clean up manually
element->free();
_allocator->deallocate(element);
return TRI_ERROR_OUT_OF_MEMORY;
}
@ -173,7 +182,7 @@ int PathBasedIndex::fillElement(std::vector<T*>& elements,
elements.emplace_back(element);
} catch (...) {
element->free();
_allocator->deallocate(element);
return TRI_ERROR_OUT_OF_MEMORY;
}
}

View File

@ -34,6 +34,8 @@ namespace aql {
enum AstNodeType : uint32_t;
}
class FixedSizeAllocator;
class PathBasedIndex : public Index {
protected:
struct PermutationState {
@ -61,7 +63,7 @@ class PathBasedIndex : public Index {
PathBasedIndex() = delete;
PathBasedIndex(TRI_idx_iid_t, arangodb::LogicalCollection*,
arangodb::velocypack::Slice const&, bool allowPartialIndex);
arangodb::velocypack::Slice const&, size_t baseSize, bool allowPartialIndex);
~PathBasedIndex();
@ -105,6 +107,8 @@ class PathBasedIndex : public Index {
std::vector<std::pair<VPackSlice, uint32_t>>& sliceStack);
protected:
std::unique_ptr<FixedSizeAllocator> _allocator;
/// @brief the attribute paths
std::vector<std::vector<std::string>> _paths;

View File

@ -25,6 +25,7 @@
#include "Aql/AstNode.h"
#include "Aql/SortCondition.h"
#include "Basics/AttributeNameParser.h"
#include "Basics/FixedSizeAllocator.h"
#include "Basics/StaticStrings.h"
#include "Basics/VelocyPackHelper.h"
#include "Indexes/IndexLookupContext.h"
@ -206,7 +207,7 @@ IndexLookupResult RocksDBIterator::next() {
RocksDBIndex::RocksDBIndex(TRI_idx_iid_t iid,
arangodb::LogicalCollection* collection,
arangodb::velocypack::Slice const& info)
: PathBasedIndex(iid, collection, info, true),
: PathBasedIndex(iid, collection, info, 0, true),
_db(RocksDBFeature::instance()->db()) {}
/// @brief destroy the index
@ -246,7 +247,7 @@ int RocksDBIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
// make sure we clean up before we leave this method
auto cleanup = [this, &elements] {
for (auto& it : elements) {
it->free();
_allocator->deallocate(it);
}
};
@ -402,7 +403,7 @@ int RocksDBIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
// make sure we clean up before we leave this method
auto cleanup = [this, &elements] {
for (auto& it : elements) {
it->free();
_allocator->deallocate(it);
}
};

View File

@ -25,6 +25,7 @@
#include "Aql/AstNode.h"
#include "Aql/SortCondition.h"
#include "Basics/AttributeNameParser.h"
#include "Basics/FixedSizeAllocator.h"
#include "Basics/StaticStrings.h"
#include "Basics/VelocyPackHelper.h"
#include "Indexes/IndexLookupContext.h"
@ -715,12 +716,12 @@ void SkiplistIterator2::initNextInterval() {
SkiplistIndex::SkiplistIndex(TRI_idx_iid_t iid,
arangodb::LogicalCollection* collection,
VPackSlice const& info)
: PathBasedIndex(iid, collection, info, true),
: PathBasedIndex(iid, collection, info, sizeof(TRI_voc_rid_t), true),
CmpElmElm(this),
CmpKeyElm(this),
_skiplistIndex(nullptr) {
_skiplistIndex =
new TRI_Skiplist(CmpElmElm, CmpKeyElm, [this](SkiplistIndexElement* element) { element->free(); }, _unique, _useExpansion);
new TRI_Skiplist(CmpElmElm, CmpKeyElm, [this](SkiplistIndexElement* element) { _allocator->deallocate(element); }, _unique, _useExpansion);
}
/// @brief destroy the skiplist index
@ -761,7 +762,7 @@ int SkiplistIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& element : elements) {
// free all elements to prevent leak
element->free();
_allocator->deallocate(element);
}
return res;
}
@ -779,7 +780,7 @@ int SkiplistIndex::insert(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
// Note: this element is freed already
for (size_t j = i; j < count; ++j) {
elements[j]->free();
_allocator->deallocate(elements[j]);
}
for (size_t j = 0; j < i; ++j) {
_skiplistIndex->remove(&context, elements[j]);
@ -812,7 +813,7 @@ int SkiplistIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
if (res != TRI_ERROR_NO_ERROR) {
for (auto& element : elements) {
// free all elements to prevent leak
element->free();
_allocator->deallocate(element);
}
return res;
}
@ -833,7 +834,7 @@ int SkiplistIndex::remove(arangodb::Transaction* trx, TRI_voc_rid_t revisionId,
res = result;
}
elements[i]->free();
_allocator->deallocate(elements[i]);
}
return res;

View File

@ -72,7 +72,7 @@ RestStatus RestBatchHandler::executeHttp() {
if (httpResponse == nullptr) {
std::cout << "please fix this for vpack" << std::endl;
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
HttpRequest const* httpRequest =
@ -80,7 +80,7 @@ RestStatus RestBatchHandler::executeHttp() {
if (httpRequest == nullptr) {
std::cout << "please fix this for vpack" << std::endl;
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
// extract the request type
@ -290,7 +290,7 @@ bool RestBatchHandler::getBoundaryBody(std::string* result) {
HttpRequest const* req = dynamic_cast<HttpRequest const*>(_request.get());
if (req == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& bodyStr = req->body();

View File

@ -276,7 +276,7 @@ int RestImportHandler::handleSingleDocument(SingleCollectionTransaction& trx,
bool RestImportHandler::createFromJson(std::string const& type) {
if (_request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request");
}
RestImportResult result;
@ -319,7 +319,7 @@ bool RestImportHandler::createFromJson(std::string const& type) {
linewise = true;
if (_response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response");
}
// auto detect import type by peeking at first non-whitespace character
@ -328,7 +328,7 @@ bool RestImportHandler::createFromJson(std::string const& type) {
HttpRequest* req = dynamic_cast<HttpRequest*>(_request.get());
if (req == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& body = req->body();
@ -388,7 +388,7 @@ bool RestImportHandler::createFromJson(std::string const& type) {
HttpRequest* req = dynamic_cast<HttpRequest*>(_request.get());
if (req == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
// each line is a separate JSON document
@ -529,7 +529,7 @@ bool RestImportHandler::createFromJson(std::string const& type) {
bool RestImportHandler::createFromVPack(std::string const& type) {
if (_request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request");
}
RestImportResult result;
@ -637,7 +637,7 @@ bool RestImportHandler::createFromVPack(std::string const& type) {
bool RestImportHandler::createFromKeyValueList() {
if (_request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request");
}
RestImportResult result;
@ -679,7 +679,7 @@ bool RestImportHandler::createFromKeyValueList() {
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());
if (httpRequest == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& bodyStr = httpRequest->body();

View File

@ -40,7 +40,7 @@ RestStatus RestPleaseUpgradeHandler::execute() {
auto response = dynamic_cast<HttpResponse*>(_response.get());
if (response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
resetResponse(rest::ResponseCode::OK);

View File

@ -744,7 +744,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
useVpp = true;
}
if (_request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request");
}
// First check the DBserver component of the body json:
@ -783,7 +783,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
if (!useVpp) {
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());
if (httpRequest == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
// Send a synchronous request to that shard using ClusterComm:
@ -833,7 +833,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
if (!useVpp) {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response.get());
if (_response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
httpResponse->body().swap(&(res->result->getBody()));
} else {
@ -1014,7 +1014,7 @@ void RestReplicationHandler::handleCommandLoggerFollow() {
dynamic_cast<HttpResponse*>(_response.get());
if (httpResponse == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
if (length > 0) {
@ -1086,7 +1086,7 @@ void RestReplicationHandler::handleCommandDetermineOpenTransactions() {
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response.get());
if (_response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
_response->setContentType(rest::ContentType::DUMP);
@ -2132,7 +2132,7 @@ int RestReplicationHandler::processRestoreDataBatch(
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());
if (httpRequest == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& bodyStr = httpRequest->body();
@ -2466,12 +2466,12 @@ void RestReplicationHandler::handleCommandRestoreDataCoordinator() {
VPackBuilder builder;
if (_request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());
if (httpRequest == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& bodyStr = httpRequest->body();
@ -3099,7 +3099,7 @@ void RestReplicationHandler::handleCommandDump() {
auto response = dynamic_cast<HttpResponse*>(_response.get());
if (response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response type");
}
response->setContentType(rest::ContentType::DUMP);

View File

@ -287,7 +287,7 @@ void RestSimpleHandler::lookupByKeys(VPackSlice const& slice) {
auto response = _response.get();
if (response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response");
}
try {

View File

@ -46,7 +46,7 @@ RestStatus RestUploadHandler::execute() {
HttpRequest* request = dynamic_cast<HttpRequest*>(_request.get());
if (request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
// extract the request type
@ -152,7 +152,7 @@ bool RestUploadHandler::parseMultiPart(char const*& body, size_t& length) {
HttpRequest* request = dynamic_cast<HttpRequest*>(_request.get());
if (request == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& bodyStr = request->body();

View File

@ -159,7 +159,7 @@ void VelocyPackCursor::dump(VPackBuilder& builder) {
} catch (std::exception const& ex) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what());
} catch (...) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "internal error during VPackCursor::dump");
}
}
@ -298,7 +298,7 @@ void ExportCursor::dump(VPackBuilder& builder) {
} catch (std::exception const& ex) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what());
} catch (...) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "internal error during ExportCursor::dump");
}
builder.options = oldOptions;
}

View File

@ -649,7 +649,7 @@ DocumentDitch* Transaction::orderDitch(TRI_voc_cid_t cid) {
TRI_transaction_collection_t* trxCollection = TRI_GetCollectionTransaction(_trx, cid, TRI_TRANSACTION_READ);
if (trxCollection == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to determine transaction collection");
}
TRI_ASSERT(trxCollection->_collection != nullptr);

View File

@ -407,7 +407,7 @@ static v8::Handle<v8::Object> RequestCppToV8(v8::Isolate* isolate,
if (rest::ContentType::JSON == request->contentType()) {
auto httpreq = dynamic_cast<HttpRequest*>(request);
if (httpreq == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
}
std::string const& body = httpreq->body();
req->ForceSet(RequestBodyKey, TRI_V8_STD_STRING(body));
@ -516,7 +516,7 @@ static v8::Handle<v8::Object> RequestCppToV8(v8::Isolate* isolate,
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(request);
if (httpRequest == nullptr) {
// maybe we can just continue
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request type");
} else {
for (auto& it : httpRequest->cookieValues()) {
cookiesObject->ForceSet(TRI_V8_STD_STRING(it.first),
@ -832,7 +832,7 @@ static TRI_action_result_t ExecuteActionVocbase(
v8::TryCatch tryCatch;
if (response == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid response");
}
TRI_GET_GLOBALS();

View File

@ -111,7 +111,7 @@ static void EdgesQuery(TRI_edge_direction_e direction,
return "FILTER doc._from " + op + " @value || doc._to " + op + " @value";
}
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid edge index direction");
};
arangodb::LogicalCollection const* collection =

View File

@ -433,7 +433,7 @@ static void JS_ConfigureApplierReplication(
}
if (vocbase->replicationApplier() == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier");
}
if (args.Length() == 0) {
@ -702,7 +702,7 @@ static void JS_StartApplierReplication(
}
if (vocbase->replicationApplier() == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier");
}
if (args.Length() > 2) {
@ -753,7 +753,7 @@ static void JS_ShutdownApplierReplication(
}
if (vocbase->replicationApplier() == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier");
}
int res = vocbase->replicationApplier()->shutdown();
@ -786,7 +786,7 @@ static void JS_StateApplierReplication(
}
if (vocbase->replicationApplier() == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier");
}
std::shared_ptr<VPackBuilder> builder = vocbase->replicationApplier()->toVelocyPack();
@ -817,7 +817,7 @@ static void JS_ForgetApplierReplication(
}
if (vocbase->replicationApplier() == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find replicationApplier");
}
int res = vocbase->replicationApplier()->forget();

View File

@ -286,7 +286,7 @@ static void JS_Transaction(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
if (params.IsEmpty()) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to decode function parameters");
}
bool embed = false;
@ -375,7 +375,7 @@ static void JS_Transaction(v8::FunctionCallbackInfo<v8::Value> const& args) {
} catch (std::exception const& ex) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, ex.what());
} catch (...) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "caught unknown exception during transaction");
}
res = trx.commit();
@ -2044,7 +2044,7 @@ static void JS_UseDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
if (vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find database");
}
if (vocbase->isDropped()) {
@ -2270,7 +2270,7 @@ static void CreateDatabaseCoordinator(
}
if (vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to find database");
}
// now run upgrade and copy users into context

View File

@ -599,7 +599,7 @@ static void EnsureIndex(v8::FunctionCallbackInfo<v8::Value> const& args,
VPackSlice f = flds.at(i);
if (!f.isString()) {
// index attributes must be strings
TRI_V8_THROW_EXCEPTION(TRI_ERROR_INTERNAL);
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "index field names should be strings");
}
indexKeys.emplace(f.copyString());
}

View File

@ -244,7 +244,7 @@ static std::shared_ptr<Index> PrepareIndexFromSlice(VPackSlice info,
switch (type) {
case arangodb::Index::TRI_IDX_TYPE_UNKNOWN: {
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid index type");
}
case arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX: {
if (!isClusterConstructor) {

View File

@ -52,7 +52,7 @@ template <typename T>
static inline T NumericValue(VPackSlice const& slice, char const* attribute) {
if (!slice.isObject()) {
LOG(ERR) << "invalid value type when looking for attribute '" << attribute << "': expecting object";
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "invalid attribute value: expecting object");
}
VPackSlice v = slice.get(attribute);
if (v.isString()) {
@ -63,7 +63,7 @@ static inline T NumericValue(VPackSlice const& slice, char const* attribute) {
}
LOG(ERR) << "invalid value for attribute '" << attribute << "'";
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "invalid attribute value");
}
/// @brief creates the recover state

View File

@ -8,6 +8,7 @@ const semver = require('semver');
const actions = require('@arangodb/actions');
const ArangoError = require('@arangodb').ArangoError;
const errors = require('@arangodb').errors;
const swaggerJson = require('@arangodb/foxx/legacy/swagger').swaggerJson;
const fm = require('@arangodb/foxx/manager');
const fmu = require('@arangodb/foxx/manager-utils');
const createRouter = require('@arangodb/foxx/router');
@ -400,3 +401,14 @@ instanceRouter.get('/readme', (req, res) => {
.description(dd`
Fetches the service's README or README.md file's contents if any.
`);
instanceRouter.get('/swagger', (req, res) => {
swaggerJson(req, res, {
mount: req.service.mount
});
})
.response(200, joi.object(), `Service Swagger description.`)
.summary(`Swagger description`)
.description(dd`
Fetches the Swagger API description for the service at the given mount path.
`);

View File

@ -106,7 +106,7 @@ const Router = module.exports =
[['path', 'string'], ...repeat(Math.max(1, args.length - 2), ['handler', 'function']), ['name', 'string']],
[['path', 'string'], ...repeat(Math.max(1, args.length - 1), ['handler', 'function'])],
[...repeat(Math.max(1, args.length - 1), ['handler', 'function']), ['name', 'string']],
repeat(args.length, ['handler', 'function'])
repeat(Math.max(1, args.length - 1), ['handler', 'function'])
);
const path = argv.path;
const handler = argv.handler;
@ -130,7 +130,7 @@ ALL_METHODS.forEach(function (method) {
[['path', 'string'], ...repeat(Math.max(1, args.length - 2), ['handler', 'function']), ['name', 'string']],
[['path', 'string'], ...repeat(Math.max(1, args.length - 1), ['handler', 'function'])],
[...repeat(Math.max(1, args.length - 1), ['handler', 'function']), ['name', 'string']],
repeat(args.length, ['handler', 'function'])
repeat(Math.max(1, args.length - 1), ['handler', 'function'])
);
const path = argv.path;
const handler = argv.handler;

View File

@ -49,6 +49,14 @@ const PARSED_JSON_MIME = (function (mime) {
]));
}(MIME_JSON));
const repeat = (times, value) => {
const arr = Array(times);
for (let i = 0; i < times; i++) {
arr[i] = value;
}
return arr;
};
module.exports = exports =
class SwaggerContext {
constructor (path) {
@ -75,6 +83,7 @@ module.exports = exports =
this._pathParams = new Map();
this._pathParamNames = [];
this._pathTokens = tokenize(path, this);
this._tags = new Set();
}
header (...args) {
@ -262,6 +271,18 @@ module.exports = exports =
return this;
}
tag (...tags) {
tags = check(
'endpoint.tag',
tags,
[...repeat(Math.max(1, tags.length), ['tag', 'string'])]
);
for (const tag of tags) {
this._tags.add(tag);
}
return this;
}
deprecated (...args) {
const [flag] = check(
'endpoint.summary',
@ -284,6 +305,9 @@ module.exports = exports =
for (const response of swaggerObj._responses.entries()) {
this._responses.set(response[0], response[1]);
}
for (const tag of swaggerObj._tags) {
this._tags.add(tag);
}
if (!this._bodyParam && swaggerObj._bodyParam) {
this._bodyParam = swaggerObj._bodyParam;
}
@ -335,6 +359,9 @@ module.exports = exports =
if (this._summary) {
operation.summary = this._summary;
}
if (this._tags) {
operation.tags = Array.from(this._tags);
}
if (this._bodyParam) {
operation.consumes = (
this._bodyParam.contentTypes

View File

@ -36,6 +36,10 @@ const validation = require('@arangodb/foxx/router/validation');
const $_ROUTES = Symbol.for('@@routes'); // routes and child routers
const $_MIDDLEWARE = Symbol.for('@@middleware'); // middleware
function ucFirst (str) {
return str[0].toUpperCase() + str.slice(1);
}
module.exports =
class Tree {
constructor (context, router) {
@ -135,11 +139,16 @@ module.exports =
buildSwaggerPaths () {
const paths = {};
const ids = new Set();
for (const route of this.flatten()) {
const parts = [];
const swagger = new SwaggerContext();
let i = 0;
const names = [];
for (const item of route) {
if (item.name) {
names.push(item.name);
}
if (item.router) {
swagger._merge(item, true);
} else {
@ -164,10 +173,28 @@ module.exports =
}
const pathItem = paths[path];
const operation = swagger._buildOperation();
if (names.length) {
operation.operationId = names
.map((name, i) => (i ? ucFirst(name) : name))
.join('');
}
for (let method of swagger._methods) {
method = method.toLowerCase();
if (!pathItem[method]) {
pathItem[method] = operation;
if (operation.operationId && swagger._methods.length > 1) {
const op = Object.assign({}, operation);
pathItem[method] = op;
if (ids.has(op.operationId)) {
let i = 2;
while (ids.has(op.operationId + i)) {
i++;
}
op.operationId += i;
}
ids.add(op.operationId);
} else {
pathItem[method] = operation;
}
}
}
}

View File

@ -21,6 +21,9 @@ const router = createRouter();
module.exports = router;
router.tag('<%= document %>');
router.get(function (req, res) {
res.send(<%= documents %>.all());
}, 'list')

View File

@ -20,6 +20,10 @@ const HTTP_CONFLICT = status('conflict');
const router = createRouter();
module.exports = router;
router.tag('<%= document %>');
const New<%= model %> = Object.assign({}, <%= model %>, {
schema: Object.assign({}, <%= model %>.schema, {
_from: joi.string(),

View File

@ -108,6 +108,8 @@ char const* Exception::what() const throw() { return _errorMessage.c_str(); }
void Exception::appendLocation () {
if (_code == TRI_ERROR_INTERNAL) {
_errorMessage += std::string(" (exception location: ") + _file + ":" + std::to_string(_line) + "). Please report this error to arangodb.com";
} else if (_code == TRI_ERROR_OUT_OF_MEMORY) {
_errorMessage += std::string(" (exception location: ") + _file + ":" + std::to_string(_line) + ").";
}
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE

View File

@ -0,0 +1,163 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Max Neunhoeffer
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_BASICS_FIXED_SIZE_ALLOCATOR_H
#define ARANGODB_BASICS_FIXED_SIZE_ALLOCATOR_H 1
#include "Basics/Common.h"
#include "Logger/Logger.h"
namespace arangodb {
class FixedSizeAllocator {
private:
class MemoryBlock {
public:
MemoryBlock(MemoryBlock const&) = delete;
MemoryBlock& operator=(MemoryBlock const&) = delete;
MemoryBlock(size_t itemSize, size_t nrItems)
: _itemSize(itemSize), _nrAlloc(nrItems), _nrUsed(0), _alloc(nullptr), _data(nullptr) {
_alloc = new char[(itemSize * nrItems) + 64];
// adjust to cache line offset (assumed to be 64 bytes)
_data = reinterpret_cast<char*>(
(reinterpret_cast<uintptr_t>(_alloc) + 63) & ~((uintptr_t)0x3fu));
}
MemoryBlock(MemoryBlock&& other)
: _itemSize(other._itemSize), _nrAlloc(other._nrAlloc), _nrUsed(other._nrUsed), _alloc(other._alloc), _data(other._data) {
other._nrAlloc = 0;
other._nrUsed = 0;
other._alloc = nullptr;
other._data = nullptr;
}
MemoryBlock& operator=(MemoryBlock&& other) {
if (this != &other) {
TRI_ASSERT(_itemSize == other._itemSize);
delete [] _alloc;
_nrAlloc = other._nrAlloc;
_nrUsed = other._nrUsed;
_alloc = other._alloc;
_data = other._data;
other._nrAlloc = 0;
other._nrUsed = 0;
other._alloc = nullptr;
other._data = nullptr;
}
return *this;
}
~MemoryBlock() {
delete[] _alloc;
}
void* next() {
TRI_ASSERT(_nrUsed < _nrAlloc);
return static_cast<void*>(_data + (_itemSize * _nrUsed++));
}
inline bool full() const {
return _nrUsed == _nrAlloc;
}
size_t memoryUsage() const {
return (_data - _alloc) + _itemSize * _nrAlloc;
}
private:
size_t const _itemSize;
size_t _nrAlloc;
size_t _nrUsed;
char* _alloc;
char* _data;
};
public:
FixedSizeAllocator(FixedSizeAllocator const&) = delete;
FixedSizeAllocator& operator=(FixedSizeAllocator const&) = delete;
explicit FixedSizeAllocator(size_t itemSize)
: _itemSize(itemSize), _freelist(nullptr) {
_blocks.reserve(4);
}
~FixedSizeAllocator() {}
void* allocate() {
if (_freelist != nullptr) {
void* element = _freelist;
_freelist = *reinterpret_cast<void**>(_freelist);
return element;
}
if (_blocks.empty() || _blocks.back()->full()) {
allocateBlock();
}
TRI_ASSERT(!_blocks.empty());
TRI_ASSERT(!_blocks.back()->full());
return _blocks.back()->next();
}
void deallocateAll() {
_blocks.clear();
_freelist = nullptr;
}
void deallocate(void* value) noexcept {
*reinterpret_cast<void**>(value) = _freelist;
_freelist = value;
}
size_t memoryUsage() const {
size_t total = 0;
for (auto const& it : _blocks) {
total += it->memoryUsage();
}
return total;
}
private:
void allocateBlock() {
size_t const size = 128 << (std::min)(size_t(8), _blocks.size());
auto block = std::make_unique<MemoryBlock>(_itemSize, size);
_blocks.emplace_back(block.get());
block.release();
}
std::vector<std::unique_ptr<MemoryBlock>> _blocks;
size_t _itemSize;
void* _freelist;
};
}
#endif

View File

@ -134,7 +134,7 @@ struct IndexBucket {
}
#endif
_nrAlloc = numberElements;
_nrAlloc = static_cast<IndexType>(numberElements);
} catch (...) {
deallocateTempfile();
TRI_ASSERT(_file == -1);