1
0
Fork 0

Merge branch 'engine-api' of https://github.com/arangodb/arangodb into engine-api

# Conflicts:
#	arangod/CMakeLists.txt
#	arangod/RocksDBEngine/RocksDBIndexFactory.cpp
This commit is contained in:
Simon Grätzer 2017-03-27 18:30:38 +02:00
commit 41e043cf40
67 changed files with 2510 additions and 1362 deletions

View File

@ -4,6 +4,19 @@ import os
import json
#import MarkdownPP
RESET = '\033[0m'
def make_std_color(No):
# defined for 1 through 7
return '\033[3' + No+ 'm'
def make_color(No):
# defined for 1 through 255
return '\033[38;5;'+ No + 'm'
WRN_COLOR = make_std_color('3')
ERR_COLOR = make_std_color('1')
STD_COLOR = make_color('8')
################################################################################
### @brief length of the swagger definition namespace
################################################################################
@ -31,8 +44,7 @@ def getReference(name, source, verb):
try:
ref = name['$ref'][defLen:]
except Exception as x:
print >>sys.stderr, "No reference in: "
print >>sys.stderr, name
print >>sys.stderr, ERR_COLOR + "No reference in: " + name + RESET
raise
if not ref in swagger['definitions']:
fn = ''
@ -40,7 +52,7 @@ def getReference(name, source, verb):
fn = swagger['paths'][route][verb]['x-filename']
else:
fn = swagger['definitions'][source]['x-filename']
print >> sys.stderr, json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True)
print >> sys.stderr, STD_COLOR + json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True) + RESET
raise Exception("invalid reference: " + ref + " in " + fn)
return ref
@ -85,8 +97,8 @@ def unwrapPostJson(reference, layer):
try:
subStructRef = getReference(thisParam['items'], reference, None)
except:
print >>sys.stderr, "while analyzing: " + param
print >>sys.stderr, thisParam
print >>sys.stderr, ERR_COLOR + "while analyzing: " + param + RESET
print >>sys.stderr, WRN_COLOR + thisParam + RESET
rc += "\n" + unwrapPostJson(subStructRef, layer + 1)
else:
rc += ' ' * layer + " - **" + param + "**: " + TrimThisParam(thisParam['description'], layer) + '\n'
@ -122,8 +134,8 @@ def getRestReplyBodyParam(param):
try:
rc += unwrapPostJson(getReference(thisVerb['responses'][param]['schema'], route, verb), 0)
except Exception:
print >>sys.stderr,"failed to search " + param + " in: "
print >>sys.stderr,json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)
print >>sys.stderr, ERR_COLOR + "failed to search " + param + " in: " + RESET
print >>sys.stderr, WRN_COLOR + json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True) + RESET
raise
return rc + "\n"
@ -273,14 +285,14 @@ def replaceCode(lines, blockName):
(verb,route) = headerMatch.group(1).split(',')[0].split(' ')
verb = verb.lower()
except:
print >> sys.stderr, "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName
print >> sys.stderr, ERR_COLOR + "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName + RESET
raise
try:
thisVerb = swagger['paths'][route][verb]
except:
print >> sys.stderr, "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName
print >> sys.stderr, lines
print >> sys.stderr, ERR_COLOR + "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName + RESET
print >> sys.stderr, WRN_COLOR + lines + RESET
raise
for (oneRX, repl) in RX:
@ -395,7 +407,7 @@ def walk_on_files(inDirPath, outDirPath):
mdpp.close()
md.close()
findStartCode(md, outFileFull)
print "Processed %d files, skipped %d" % (count, skipped)
print STD_COLOR + "Processed %d files, skipped %d" % (count, skipped) + RESET
def findStartCode(fd,full_path):
inFD = open(full_path, "r")
@ -422,7 +434,7 @@ def findStartCode(fd,full_path):
try:
textFile = replaceCodeFullFile(textFile)
except:
print >>sys.stderr, "while parsing :\n" + textFile
print >>sys.stderr, ERR_COLOR + "while parsing : " + full_path + RESET
raise
#print "9" * 80
#print textFile
@ -438,10 +450,10 @@ def replaceText(text, pathOfFile, searchText):
#print '7'*80
global dokuBlocks
if not searchText in dokuBlocks[0]:
print >> sys.stderr, "Failed to locate the docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
print >> sys.stderr, dokuBlocks[0].keys()
print >> sys.stderr, '*' * 80
print >> sys.stderr, text
print >> sys.stderr, "%sFailed to locate the docublock '%s' for replacing it into the file '%s'\n have:%s" % (ERR_COLOR, searchText, pathOfFile, RESET)
print >> sys.stderr, WRN_COLOR + dokuBlocks[0].keys() + RESET
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
print >> sys.stderr, WRN_COLOR + text + RESET
exit(1)
#print '7'*80
#print dokuBlocks[0][searchText]
@ -453,22 +465,22 @@ def replaceTextInline(text, pathOfFile, searchText):
''' reads the mdpp and generates the md '''
global dokuBlocks
if not searchText in dokuBlocks[1]:
print >> sys.stderr, "Failed to locate the inline docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
print >> sys.stderr, dokuBlocks[1].keys()
print >> sys.stderr, '*' * 80
print >> sys.stderr, text
print >> sys.stderr, "%sFailed to locate the inline docublock '%s' for replacing it into the file '%s'\n have: %s" % (ERR_COLOR, searchText, pathOfFile, RESET)
print >> sys.stderr, "%s%s%s" %(WRN_COLOR, dokuBlocks[1].keys(), RESET)
print >> sys.stderr, ERR_COLOR + '*' * 80 + RESET
print >> sys.stderr, WRN_COLOR + text + RESET
exit(1)
rePattern = r'(?s)\s*@startDocuBlockInline\s+'+ searchText +'\s.*?@endDocuBlock\s' + searchText
# (?s) is equivalent to flags=re.DOTALL but works in Python 2.6
match = re.search(rePattern, text)
if (match == None):
print >> sys.stderr, "failed to match with '%s' for %s in file %s in: \n%s" % (rePattern, searchText, pathOfFile, text)
print >> sys.stderr, "%sfailed to match with '%s' for %s in file %s in: \n%s" % (ERR_COLOR, rePattern, searchText, pathOfFile, text, RESET)
exit(1)
subtext = match.group(0)
if (len(re.findall('@startDocuBlock', subtext)) > 1):
print >> sys.stderr, "failed to snap with '%s' on end docublock for %s in %s our match is:\n%s" % (rePattern, searchText, pathOfFile, subtext)
print >> sys.stderr, "%sfailed to snap with '%s' on end docublock for %s in %s our match is:\n%s" % (ERR_COLOR, rePattern, searchText, pathOfFile, subtext, RESET)
exit(1)
return re.sub(rePattern, dokuBlocks[1][searchText], text)
@ -495,7 +507,7 @@ def readStartLine(line):
try:
thisBlockName = SEARCH_START.search(line).group(1).strip()
except:
print >> sys.stderr, "failed to read startDocuBlock: [" + line + "]"
print >> sys.stderr, ERR_COLOR + "failed to read startDocuBlock: [" + line + "]" + RESET
exit(1)
dokuBlocks[thisBlockType][thisBlockName] = ""
return STATE_SEARCH_END
@ -525,10 +537,10 @@ def loadDokuBlocks():
if blockFilter != None:
remainBlocks= {}
print "filtering blocks"
print STD_COLOR + "filtering blocks" + RESET
for oneBlock in dokuBlocks[0]:
if blockFilter.match(oneBlock) != None:
print "found block %s" % oneBlock
print "%sfound block %s%s" % (STD_COLOR, oneBlock, RESET)
#print dokuBlocks[0][oneBlock]
remainBlocks[oneBlock] = dokuBlocks[0][oneBlock]
dokuBlocks[0] = remainBlocks
@ -541,14 +553,14 @@ def loadDokuBlocks():
#print dokuBlocks[0][oneBlock]
#print "6"*80
except:
print >>sys.stderr, "while parsing :\n" + oneBlock
print >>sys.stderr, ERR_COLOR + "while parsing :\n" + oneBlock + RESET
raise
for oneBlock in dokuBlocks[1]:
try:
dokuBlocks[1][oneBlock] = replaceCode(dokuBlocks[1][oneBlock], oneBlock)
except:
print >>sys.stderr, "while parsing :\n" + oneBlock
print >>sys.stderr, WRN_COLOR + "while parsing :\n" + oneBlock + RESET
raise
@ -560,15 +572,15 @@ if __name__ == '__main__':
outDir = sys.argv[2]
swaggerJson = sys.argv[3]
if len(sys.argv) > 4 and sys.argv[4].strip() != '':
print "filtering " + sys.argv[4]
print STD_COLOR + "filtering " + sys.argv[4] + RESET
fileFilter = re.compile(sys.argv[4])
if len(sys.argv) > 5 and sys.argv[5].strip() != '':
print "filtering Docublocks: " + sys.argv[5]
print STD_COLOR + "filtering Docublocks: " + sys.argv[5] + RESET
blockFilter = re.compile(sys.argv[5])
f=open(swaggerJson, 'rU')
swagger= json.load(f)
f.close()
loadDokuBlocks()
print "loaded %d / %d docu blocks" % (len(dokuBlocks[0]), len(dokuBlocks[1]))
print "%sloaded %d / %d docu blocks%s" % (STD_COLOR, len(dokuBlocks[0]), len(dokuBlocks[1]), RESET)
#print dokuBlocks[0].keys()
walk_on_files(inDir, outDir)

View File

@ -38,6 +38,7 @@
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::aql;

View File

@ -218,6 +218,8 @@ SET(ARANGOD_SOURCES
GeneralServer/RestHandlerFactory.cpp
GeneralServer/RestStatus.cpp
GeneralServer/VppCommTask.cpp
Graph/BreadthFirstEnumerator.cpp
Graph/NeighborsEnumerator.cpp
Indexes/Index.cpp
Indexes/IndexIterator.cpp
Indexes/SimpleAttributeEqualityMatcher.cpp
@ -337,6 +339,7 @@ SET(ARANGOD_SOURCES
VocBase/SingleServerTraverser.cpp
VocBase/TransactionManager.cpp
VocBase/Traverser.cpp
VocBase/TraverserCache.cpp
VocBase/TraverserOptions.cpp
VocBase/modes.cpp
VocBase/replication-applier.cpp
@ -438,7 +441,7 @@ set(ARANGOD_SOURCES
RocksDBEngine/RocksDBEntry.cpp
RocksDBEngine/RocksDBIndexFactory.cpp
RocksDBEngine/RocksDBPrimaryIndex.cpp
RocksDBEngine/RocksDBEdgeIndex.cpp
RocksDBEngine/RocksDBPrimaryMockIndex.cpp
RocksDBEngine/RocksDBTransactionCollection.cpp
RocksDBEngine/RocksDBTransactionState.cpp
RocksDBEngine/RocksDBTypes.cpp

View File

@ -79,19 +79,23 @@ Table::Table(uint32_t logSize)
_size(static_cast<uint64_t>(1) << _logSize),
_shift(32 - _logSize),
_mask((_size - 1) << _shift),
_buckets(new GenericBucket[_size]),
_buffer(new uint8_t[(_size * BUCKET_SIZE) + Table::padding]),
_buckets(reinterpret_cast<GenericBucket*>(
reinterpret_cast<uint64_t>((_buffer.get() + 63)) &
~(static_cast<uint64_t>(0x3fU)))),
_auxiliary(nullptr),
_bucketClearer(defaultClearer),
_slotsTotal(_size),
_slotsUsed(0) {
_slotsUsed(static_cast<uint64_t>(0)) {
_state.lock();
_state.toggleFlag(State::Flag::disabled);
memset(_buckets.get(), 0, BUCKET_SIZE * _size);
memset(_buckets, 0, BUCKET_SIZE * _size);
_state.unlock();
}
uint64_t Table::allocationSize(uint32_t logSize) {
return sizeof(Table) + (BUCKET_SIZE * (static_cast<uint64_t>(1) << logSize));
return sizeof(Table) + (BUCKET_SIZE * (static_cast<uint64_t>(1) << logSize)) +
Table::padding;
}
uint64_t Table::memoryUsage() const { return Table::allocationSize(_logSize); }
@ -108,7 +112,6 @@ std::pair<void*, std::shared_ptr<Table>> Table::fetchAndLockBucket(
if (ok) {
ok = !_state.isSet(State::Flag::disabled);
if (ok) {
TRI_ASSERT(_buckets.get() != nullptr);
bucket = &(_buckets[(hash & _mask) >> _shift]);
source = shared_from_this();
ok = bucket->lock(maxTries);
@ -154,7 +157,6 @@ void* Table::primaryBucket(uint32_t index) {
if (!isEnabled()) {
return nullptr;
}
TRI_ASSERT(_buckets.get() != nullptr);
return &(_buckets[index]);
}

View File

@ -43,6 +43,7 @@ class Table : public std::enable_shared_from_this<Table> {
static const uint32_t maxLogSize;
static constexpr uint32_t standardLogSizeAdjustment = 6;
static constexpr int64_t triesGuarantee = -1;
static constexpr uint64_t padding = 64;
typedef std::function<void(void*)> BucketClearer;
@ -187,7 +188,8 @@ class Table : public std::enable_shared_from_this<Table> {
uint64_t _size;
uint32_t _shift;
uint32_t _mask;
std::unique_ptr<GenericBucket[]> _buckets;
std::unique_ptr<uint8_t[]> _buffer;
GenericBucket* _buckets;
std::shared_ptr<Table> _auxiliary;

View File

@ -26,39 +26,48 @@
#include "Cluster/ClusterMethods.h"
#include "Cluster/ClusterTraverser.h"
#include "Transaction/Helpers.h"
#include "Transaction/Methods.h"
#include "VocBase/TraverserCache.h"
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using ClusterEdgeCursor = arangodb::traverser::ClusterEdgeCursor;
ClusterEdgeCursor::ClusterEdgeCursor(VPackSlice v, uint64_t depth,
ClusterEdgeCursor::ClusterEdgeCursor(StringRef vertexId, uint64_t depth,
arangodb::traverser::ClusterTraverser* traverser)
: _position(0) {
: _position(0), _resolver(traverser->_trx->resolver()), _traverser(traverser) {
transaction::BuilderLeaser leased(traverser->_trx);
fetchEdgesFromEngines(traverser->_dbname, traverser->_engines, v, depth,
transaction::BuilderLeaser b(traverser->_trx);
b->add(VPackValuePair(vertexId.data(), vertexId.length(), VPackValueType::String));
fetchEdgesFromEngines(traverser->_dbname, traverser->_engines, b->slice(), depth,
traverser->_edges, _edgeList, traverser->_datalake,
*(leased.get()), traverser->_filteredPaths,
traverser->_readDocuments);
}
bool ClusterEdgeCursor::next(std::vector<VPackSlice>& result, size_t& cursorId) {
bool ClusterEdgeCursor::next(std::function<void(StringRef const&,
VPackSlice, size_t)> callback) {
if (_position < _edgeList.size()) {
result.emplace_back(_edgeList[_position]);
VPackSlice edge = _edgeList[_position];
std::string eid = transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
StringRef persId = _traverser->traverserCache()->persistString(StringRef(eid));
callback(persId, edge, _position);
++_position;
return true;
}
return false;
}
bool ClusterEdgeCursor::readAll(std::unordered_set<VPackSlice>& result, size_t& cursorId) {
if (_position == 0) {
// We have not yet returned anything. So we simply return everything at once.
std::copy(_edgeList.begin(), _edgeList.end(), std::inserter(result, result.end()));
_position++;
return true;
void ClusterEdgeCursor::readAll(std::function<void(StringRef const&,
VPackSlice, size_t&)> callback) {
for (auto const& edge : _edgeList) {
std::string eid = transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
StringRef persId = _traverser->traverserCache()->persistString(StringRef(eid));
callback(persId, edge, _position);
}
// We have already returned everything last time.
return false;
}

View File

@ -27,25 +27,30 @@
#include "VocBase/TraverserOptions.h"
namespace arangodb {
class CollectionNameResolver;
namespace traverser {
class Traverser;
class ClusterEdgeCursor : public EdgeCursor {
public:
ClusterEdgeCursor(arangodb::velocypack::Slice, uint64_t, ClusterTraverser*);
ClusterEdgeCursor(StringRef vid, uint64_t, ClusterTraverser*);
~ClusterEdgeCursor() {
}
bool next(std::vector<arangodb::velocypack::Slice>&, size_t&) override;
bool next(std::function<void(arangodb::StringRef const&, arangodb::velocypack::Slice, size_t)> callback) override;
bool readAll(std::unordered_set<arangodb::velocypack::Slice>&, size_t&) override;
void readAll(std::function<void(arangodb::StringRef const&, arangodb::velocypack::Slice, size_t&)> callback) override;
private:
std::vector<arangodb::velocypack::Slice> _edgeList;
size_t _position;
CollectionNameResolver const* _resolver;
arangodb::traverser::Traverser* _traverser;
};
}
}

View File

@ -1486,7 +1486,7 @@ int fetchEdgesFromEngines(
std::unordered_map<ServerID, traverser::TraverserEngineID> const* engines,
VPackSlice const vertexId,
size_t depth,
std::unordered_map<VPackSlice, VPackSlice>& cache,
std::unordered_map<StringRef, VPackSlice>& cache,
std::vector<VPackSlice>& result,
std::vector<std::shared_ptr<VPackBuilder>>& datalake,
VPackBuilder& builder,
@ -1547,11 +1547,12 @@ int fetchEdgesFromEngines(
VPackSlice edges = resSlice.get("edges");
for (auto const& e : VPackArrayIterator(edges)) {
VPackSlice id = e.get(StaticStrings::IdString);
auto resE = cache.find(id);
StringRef idRef(id);
auto resE = cache.find(idRef);
if (resE == cache.end()) {
// This edge is not yet cached.
allCached = false;
cache.emplace(id, e);
cache.emplace(idRef, e);
result.emplace_back(e);
} else {
result.emplace_back(resE->second);
@ -1576,8 +1577,8 @@ int fetchEdgesFromEngines(
void fetchVerticesFromEngines(
std::string const& dbname,
std::unordered_map<ServerID, traverser::TraverserEngineID> const* engines,
std::unordered_set<VPackSlice>& vertexIds,
std::unordered_map<VPackSlice, std::shared_ptr<VPackBuffer<uint8_t>>>&
std::unordered_set<StringRef>& vertexIds,
std::unordered_map<StringRef, std::shared_ptr<VPackBuffer<uint8_t>>>&
result,
VPackBuilder& builder) {
auto cc = ClusterComm::instance();
@ -1594,8 +1595,8 @@ void fetchVerticesFromEngines(
builder.add(VPackValue("keys"));
builder.openArray();
for (auto const& v : vertexIds) {
TRI_ASSERT(v.isString());
builder.add(v);
//TRI_ASSERT(v.isString());
builder.add(VPackValuePair(v.data(), v.length(), VPackValueType::String));
}
builder.close(); // 'keys' Array
builder.close(); // base object
@ -1638,17 +1639,18 @@ void fetchVerticesFromEngines(
resSlice, "errorMessage", TRI_errno_string(code)));
}
for (auto const& pair : VPackObjectIterator(resSlice)) {
if (vertexIds.erase(pair.key) == 0) {
StringRef key(pair.key);
if (vertexIds.erase(key) == 0) {
// We either found the same vertex twice,
// or found a vertex we did not request.
// Anyways something somewhere went seriously wrong
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_GOT_CONTRADICTING_ANSWERS);
}
TRI_ASSERT(result.find(pair.key) == result.end());
TRI_ASSERT(result.find(key) == result.end());
auto val = VPackBuilder::clone(pair.value);
VPackSlice id = val.slice().get(StaticStrings::IdString);
TRI_ASSERT(id.isString());
result.emplace(id, val.steal());
result.emplace(StringRef(id), val.steal());
}
}

View File

@ -25,7 +25,7 @@
#define ARANGOD_CLUSTER_CLUSTER_METHODS_H 1
#include "Basics/Common.h"
#include "Basics/StringRef.h"
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
@ -130,9 +130,8 @@ int getDocumentOnCoordinator(
int fetchEdgesFromEngines(
std::string const&,
std::unordered_map<ServerID, traverser::TraverserEngineID> const*,
arangodb::velocypack::Slice const, size_t,
std::unordered_map<arangodb::velocypack::Slice,
arangodb::velocypack::Slice>&,
arangodb::velocypack::Slice vertexId, size_t,
std::unordered_map<StringRef, arangodb::velocypack::Slice>&,
std::vector<arangodb::velocypack::Slice>&,
std::vector<std::shared_ptr<arangodb::velocypack::Builder>>&,
arangodb::velocypack::Builder&, size_t&, size_t&);
@ -149,9 +148,8 @@ int fetchEdgesFromEngines(
void fetchVerticesFromEngines(
std::string const&,
std::unordered_map<ServerID, traverser::TraverserEngineID> const*,
std::unordered_set<arangodb::velocypack::Slice>&,
std::unordered_map<arangodb::velocypack::Slice,
std::shared_ptr<arangodb::velocypack::Buffer<uint8_t>>>&,
std::unordered_set<StringRef>&,
std::unordered_map<StringRef, std::shared_ptr<arangodb::velocypack::Buffer<uint8_t>>>&,
arangodb::velocypack::Builder&);
////////////////////////////////////////////////////////////////////////////////

View File

@ -25,7 +25,9 @@
#include "Basics/StaticStrings.h"
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ClusterMethods.h"
#include "Graph/BreadthFirstEnumerator.h"
#include "Transaction/Helpers.h"
#include "VocBase/TraverserCache.h"
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
@ -43,17 +45,17 @@ ClusterTraverser::ClusterTraverser(
_opts->linkTraverser(this);
}
void ClusterTraverser::setStartVertex(std::string const& id) {
void ClusterTraverser::setStartVertex(std::string const& vid) {
_verticesToFetch.clear();
_startIdBuilder->clear();
_startIdBuilder->add(VPackValue(id));
_startIdBuilder->add(VPackValue(vid));
VPackSlice idSlice = _startIdBuilder->slice();
auto it = _vertices.find(idSlice);
auto it = _vertices.find(StringRef(vid));
if (it == _vertices.end()) {
size_t firstSlash = id.find("/");
size_t firstSlash = vid.find("/");
if (firstSlash == std::string::npos ||
id.find("/", firstSlash + 1) != std::string::npos) {
vid.find("/", firstSlash + 1) != std::string::npos) {
// We can stop here. The start vertex is not a valid _id
++_filteredPaths;
_done = true;
@ -66,23 +68,24 @@ void ClusterTraverser::setStartVertex(std::string const& id) {
_done = true;
return;
}
StringRef persId = traverserCache()->persistString(StringRef(vid));
_vertexGetter->reset(idSlice);
_vertexGetter->reset(persId);
if (_opts->useBreadthFirst) {
_enumerator.reset(
new arangodb::traverser::BreadthFirstEnumerator(this, idSlice, _opts));
new arangodb::graph::BreadthFirstEnumerator(this, idSlice, _opts));
} else {
_enumerator.reset(
new arangodb::traverser::DepthFirstEnumerator(this, idSlice, _opts));
new arangodb::traverser::DepthFirstEnumerator(this, vid, _opts));
}
_done = false;
}
bool ClusterTraverser::getVertex(VPackSlice edge,
std::vector<VPackSlice>& result) {
std::vector<StringRef>& result) {
bool res = _vertexGetter->getVertex(edge, result);
if (res) {
VPackSlice other = result.back();
StringRef const& other = result.back();
if (_vertices.find(other) == _vertices.end()) {
// Vertex not yet cached. Prepare it.
_verticesToFetch.emplace(other);
@ -91,13 +94,13 @@ bool ClusterTraverser::getVertex(VPackSlice edge,
return res;
}
bool ClusterTraverser::getSingleVertex(VPackSlice edge, VPackSlice comp,
uint64_t depth, VPackSlice& result) {
bool res = _vertexGetter->getSingleVertex(edge, comp, depth, result);
bool ClusterTraverser::getSingleVertex(arangodb::velocypack::Slice edge, StringRef const sourceVertexId,
uint64_t depth, StringRef& targetVertexId) {
bool res = _vertexGetter->getSingleVertex(edge, sourceVertexId, depth, targetVertexId);
if (res) {
if (_vertices.find(result) == _vertices.end()) {
if (_vertices.find(targetVertexId) == _vertices.end()) {
// Vertex not yet cached. Prepare it.
_verticesToFetch.emplace(result);
_verticesToFetch.emplace(targetVertexId);
}
}
return res;
@ -111,8 +114,8 @@ void ClusterTraverser::fetchVertices() {
_verticesToFetch.clear();
}
aql::AqlValue ClusterTraverser::fetchVertexData(VPackSlice idString) {
TRI_ASSERT(idString.isString());
aql::AqlValue ClusterTraverser::fetchVertexData(StringRef idString) {
//TRI_ASSERT(idString.isString());
auto cached = _vertices.find(idString);
if (cached == _vertices.end()) {
// Vertex not yet cached. Prepare for load.
@ -125,23 +128,23 @@ aql::AqlValue ClusterTraverser::fetchVertexData(VPackSlice idString) {
return aql::AqlValue((*cached).second->data());
}
aql::AqlValue ClusterTraverser::fetchEdgeData(VPackSlice edge) {
return aql::AqlValue(edge);
aql::AqlValue ClusterTraverser::fetchEdgeData(StringRef eid) {
return aql::AqlValue(_edges[eid]);//this->_cache->fetchAqlResult(edge);
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to add the real data of a vertex into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void ClusterTraverser::addVertexToVelocyPack(VPackSlice id,
void ClusterTraverser::addVertexToVelocyPack(StringRef vid,
VPackBuilder& result) {
TRI_ASSERT(id.isString());
auto cached = _vertices.find(id);
//TRI_ASSERT(id.isString());
auto cached = _vertices.find(vid);
if (cached == _vertices.end()) {
// Vertex not yet cached. Prepare for load.
_verticesToFetch.emplace(id);
_verticesToFetch.emplace(vid);
fetchVertices();
cached = _vertices.find(id);
cached = _vertices.find(vid);
}
// Now all vertices are cached!!
TRI_ASSERT(cached != _vertices.end());
@ -152,7 +155,7 @@ void ClusterTraverser::addVertexToVelocyPack(VPackSlice id,
/// @brief Function to add the real data of an edge into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void ClusterTraverser::addEdgeToVelocyPack(arangodb::velocypack::Slice edge,
void ClusterTraverser::addEdgeToVelocyPack(StringRef eid,
arangodb::velocypack::Builder& result) {
result.add(edge);
result.add(_edges[eid]);
}

View File

@ -61,57 +61,56 @@ class ClusterTraverser final : public Traverser {
/// Returns true if the vertex passes filtering conditions
/// Also apppends the _id value of the vertex in the given vector
bool getVertex(arangodb::velocypack::Slice,
std::vector<arangodb::velocypack::Slice>&) override;
bool getVertex(arangodb::velocypack::Slice, std::vector<arangodb::StringRef>&) override;
/// @brief Function to load the other sides vertex of an edge
/// Returns true if the vertex passes filtering conditions
bool getSingleVertex(arangodb::velocypack::Slice, arangodb::velocypack::Slice,
uint64_t, arangodb::velocypack::Slice&) override;
bool getSingleVertex(arangodb::velocypack::Slice edge,
StringRef const sourceVertexId,
uint64_t depth,
StringRef& targetVertexId) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of a vertex into an AQLValue
//////////////////////////////////////////////////////////////////////////////
aql::AqlValue fetchVertexData(arangodb::velocypack::Slice) override;
aql::AqlValue fetchVertexData(StringRef) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of an edge into an AQLValue
//////////////////////////////////////////////////////////////////////////////
aql::AqlValue fetchEdgeData(arangodb::velocypack::Slice) override;
aql::AqlValue fetchEdgeData(StringRef) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to add the real data of a vertex into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void addVertexToVelocyPack(arangodb::velocypack::Slice,
void addVertexToVelocyPack(StringRef,
arangodb::velocypack::Builder&) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to add the real data of an edge into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void addEdgeToVelocyPack(arangodb::velocypack::Slice,
void addEdgeToVelocyPack(StringRef,
arangodb::velocypack::Builder&) override;
private:
void fetchVertices();
std::unordered_map<arangodb::velocypack::Slice, arangodb::velocypack::Slice>
std::unordered_map<StringRef, arangodb::velocypack::Slice>
_edges;
std::unordered_map<arangodb::velocypack::Slice,
std::shared_ptr<arangodb::velocypack::Buffer<uint8_t>>>
std::unordered_map<StringRef, std::shared_ptr<arangodb::velocypack::Buffer<uint8_t>>>
_vertices;
std::string _dbname;
std::unordered_map<ServerID, traverser::TraverserEngineID> const* _engines;
std::unordered_set<arangodb::velocypack::Slice> _verticesToFetch;
std::unordered_set<StringRef> _verticesToFetch;
std::vector<std::shared_ptr<arangodb::velocypack::Builder>> _datalake;

View File

@ -136,42 +136,38 @@ void BaseTraverserEngine::getEdges(VPackSlice vertex, size_t depth, VPackBuilder
// We just hope someone has locked the shards properly. We have no clue... Thanks locking
TRI_ASSERT(vertex.isString() || vertex.isArray());
size_t cursorId = 0;
size_t read = 0;
size_t filtered = 0;
ManagedDocumentResult mmdr;
std::vector<VPackSlice> result;
//std::vector<VPackSlice> result;
builder.openObject();
builder.add(VPackValue("edges"));
builder.openArray();
if (vertex.isArray()) {
for (VPackSlice v : VPackArrayIterator(vertex)) {
TRI_ASSERT(v.isString());
result.clear();
auto edgeCursor = _opts->nextCursor(&mmdr, v, depth);
while (edgeCursor->next(result, cursorId)) {
if (!_opts->evaluateEdgeExpression(result.back(), v, depth, cursorId)) {
//result.clear();
StringRef vertexId(v);
auto edgeCursor = _opts->nextCursor(&mmdr, vertexId, depth);
edgeCursor->readAll([&] (StringRef const& documentId, VPackSlice edge, size_t cursorId) {
if (!_opts->evaluateEdgeExpression(edge, StringRef(v), depth, cursorId)) {
filtered++;
result.pop_back();
} else {
builder.add(edge);
}
}
for (auto const& it : result) {
builder.add(it);
}
});
// Result now contains all valid edges, probably multiples.
}
} else if (vertex.isString()) {
std::unique_ptr<arangodb::traverser::EdgeCursor> edgeCursor(_opts->nextCursor(&mmdr, vertex, depth));
while (edgeCursor->next(result, cursorId)) {
if (!_opts->evaluateEdgeExpression(result.back(), vertex, depth, cursorId)) {
std::unique_ptr<arangodb::traverser::EdgeCursor> edgeCursor(_opts->nextCursor(&mmdr, StringRef(vertex), depth));
edgeCursor->readAll([&] (StringRef const& documentId, VPackSlice edge, size_t cursorId) {
if (!_opts->evaluateEdgeExpression(edge, StringRef(vertex), depth, cursorId)) {
filtered++;
result.pop_back();
} else {
builder.add(edge);
}
}
for (auto const& it : result) {
builder.add(it);
}
});
// Result now contains all valid edges, probably multiples.
} else {
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);

View File

@ -0,0 +1,221 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#include "BreadthFirstEnumerator.h"
#include "VocBase/Traverser.h"
#include "VocBase/TraverserCache.h"
#include "VocBase/TraverserOptions.h"
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::traverser;
using BreadthFirstEnumerator = arangodb::graph::BreadthFirstEnumerator;
BreadthFirstEnumerator::PathStep::PathStep(StringRef const vertex)
: sourceIdx(0),
vertex(vertex) {
}
BreadthFirstEnumerator::PathStep::PathStep(size_t sourceIdx,
StringRef const edge,
StringRef const vertex) :
sourceIdx(sourceIdx), edge(edge), vertex(vertex) {
}
BreadthFirstEnumerator::BreadthFirstEnumerator(Traverser* traverser,
VPackSlice startVertex,
TraverserOptions* opts)
: PathEnumerator(traverser, startVertex.copyString(), opts),
_schreierIndex(1),
_lastReturned(0),
_currentDepth(0),
_toSearchPos(0) {
_schreier.reserve(32);
StringRef startVId = _opts->cache()->persistString(StringRef(startVertex));
_schreier.emplace_back(startVId);
_toSearch.emplace_back(NextStep(0));
}
bool BreadthFirstEnumerator::next() {
if (_isFirst) {
_isFirst = false;
if (_opts->minDepth == 0) {
return true;
}
}
_lastReturned++;
if (_lastReturned < _schreierIndex) {
// We still have something on our stack.
// Paths have been read but not returned.
return true;
}
if (_opts->maxDepth == 0) {
// Short circuit.
// We cannot find any path of length 0 or less
return false;
}
// Avoid large call stacks.
// Loop will be left if we are either finished
// with searching.
// Or we found vertices in the next depth for
// a vertex.
while (true) {
if (_toSearchPos >= _toSearch.size()) {
// This depth is done. GoTo next
if (_nextDepth.empty()) {
// That's it. we are done.
return false;
}
// Save copies:
// We clear current
// we swap current and next.
// So now current is filled
// and next is empty.
_toSearch.clear();
_toSearchPos = 0;
_toSearch.swap(_nextDepth);
_currentDepth++;
TRI_ASSERT(_toSearchPos < _toSearch.size());
TRI_ASSERT(_nextDepth.empty());
TRI_ASSERT(_currentDepth < _opts->maxDepth);
}
// This access is always safe.
// If not it should have bailed out before.
TRI_ASSERT(_toSearchPos < _toSearch.size());
_tmpEdges.clear();
auto const nextIdx = _toSearch[_toSearchPos++].sourceIdx;
auto const nextVertex = _schreier[nextIdx].vertex;
StringRef vId;
std::unique_ptr<arangodb::traverser::EdgeCursor> cursor(_opts->nextCursor(_traverser->mmdr(), nextVertex, _currentDepth));
if (cursor != nullptr) {
bool shouldReturnPath = _currentDepth + 1 >= _opts->minDepth;
bool didInsert = false;
auto callback = [&] (arangodb::StringRef const& eid, VPackSlice e, size_t cursorIdx) -> void {
if (_opts->uniqueEdges ==
TraverserOptions::UniquenessLevel::GLOBAL) {
if (_returnedEdges.find(eid) == _returnedEdges.end()) {
// Edge not yet visited. Mark and continue.
// TODO FIXME the edge will run out of scope
_returnedEdges.emplace(eid);
} else {
// Edge filtered due to unique_constraint
_traverser->_filteredPaths++;
return;
}
}
if (!_traverser->edgeMatchesConditions(e, nextVertex,
_currentDepth,
cursorIdx)) {
return;
}
if (_traverser->getSingleVertex(e, nextVertex, _currentDepth, vId)) {
_schreier.emplace_back(nextIdx, eid, vId);
if (_currentDepth < _opts->maxDepth - 1) {
_nextDepth.emplace_back(NextStep(_schreierIndex));
}
_schreierIndex++;
didInsert = true;
}
};
cursor->readAll(callback);
if (!shouldReturnPath) {
_lastReturned = _schreierIndex;
didInsert = false;
}
if (didInsert) {
// We exit the loop here.
// _schreierIndex is moved forward
break;
}
}
// Nothing found for this vertex.
// _toSearchPos is increased so
// we are not stuck in an endless loop
}
// _lastReturned points to the last used
// entry. We compute the path to it.
return true;
}
arangodb::aql::AqlValue BreadthFirstEnumerator::lastVertexToAqlValue() {
TRI_ASSERT(_lastReturned < _schreier.size());
PathStep const& current = _schreier[_lastReturned];
return _traverser->fetchVertexData(StringRef(current.vertex));
}
arangodb::aql::AqlValue BreadthFirstEnumerator::lastEdgeToAqlValue() {
TRI_ASSERT(_lastReturned < _schreier.size());
if (_lastReturned == 0) {
// This is the first Vertex. No Edge Pointing to it
return arangodb::aql::AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
PathStep const& current = _schreier[_lastReturned];
return _traverser->fetchEdgeData(StringRef(current.edge));
}
arangodb::aql::AqlValue BreadthFirstEnumerator::pathToAqlValue(
arangodb::velocypack::Builder& result) {
// TODO make deque class variable
std::deque<size_t> fullPath;
size_t cur = _lastReturned;
while (cur != 0) {
// Walk backwards through the path and push everything found on the local stack
fullPath.emplace_front(cur);
cur = _schreier[cur].sourceIdx;
}
result.clear();
result.openObject();
result.add(VPackValue("edges"));
result.openArray();
for (auto const& idx : fullPath) {
_traverser->addEdgeToVelocyPack(StringRef(_schreier[idx].edge), result);
}
result.close(); // edges
result.add(VPackValue("vertices"));
result.openArray();
// Always add the start vertex
_traverser->addVertexToVelocyPack(_schreier[0].vertex, result);
for (auto const& idx : fullPath) {
_traverser->addVertexToVelocyPack(_schreier[idx].vertex, result);
}
result.close(); // vertices
result.close();
return arangodb::aql::AqlValue(result.slice());
}

View File

@ -0,0 +1,166 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_GRAPH_BREADTHFIRSTENUMERATOR_H
#define ARANGODB_GRAPH_BREADTHFIRSTENUMERATOR_H 1
#include "Basics/Common.h"
#include "VocBase/PathEnumerator.h"
namespace arangodb {
namespace traverser {
class Traverser;
struct TraverserOptions;
}
namespace graph {
class BreadthFirstEnumerator final : public arangodb::traverser::PathEnumerator {
private:
//////////////////////////////////////////////////////////////////////////////
/// @brief One entry in the schreier vector
//////////////////////////////////////////////////////////////////////////////
struct PathStep {
size_t sourceIdx;
arangodb::StringRef const edge;
arangodb::StringRef const vertex;
private:
PathStep() {}
public:
explicit PathStep(arangodb::StringRef const vertex);
PathStep(size_t sourceIdx, arangodb::StringRef const edge,
arangodb::StringRef const vertex);
};
//////////////////////////////////////////////////////////////////////////////
/// @brief Struct to hold all information required to get the list of
/// connected edges
//////////////////////////////////////////////////////////////////////////////
struct NextStep {
size_t sourceIdx;
private:
NextStep() = delete;
public:
explicit NextStep(size_t sourceIdx)
: sourceIdx(sourceIdx) {}
};
//////////////////////////////////////////////////////////////////////////////
/// @brief schreier vector to store the visited vertices
//////////////////////////////////////////////////////////////////////////////
std::vector<PathStep> _schreier;
//////////////////////////////////////////////////////////////////////////////
/// @brief Next free index in schreier vector.
//////////////////////////////////////////////////////////////////////////////
size_t _schreierIndex;
//////////////////////////////////////////////////////////////////////////////
/// @brief Position of the last returned value in the schreier vector
//////////////////////////////////////////////////////////////////////////////
size_t _lastReturned;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector to store where to continue search on next depth
//////////////////////////////////////////////////////////////////////////////
std::vector<NextStep> _nextDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::vector<NextStep> _toSearch;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::unordered_set<arangodb::velocypack::Slice> _tmpEdges;
//////////////////////////////////////////////////////////////////////////////
/// @brief Marker for the search depth. Used to abort searching.
//////////////////////////////////////////////////////////////////////////////
uint64_t _currentDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief position in _toSearch. If this is >= _toSearch.size() we are done
/// with this depth.
//////////////////////////////////////////////////////////////////////////////
size_t _toSearchPos;
public:
BreadthFirstEnumerator(arangodb::traverser::Traverser* traverser,
arangodb::velocypack::Slice startVertex,
arangodb::traverser::TraverserOptions* opts);
~BreadthFirstEnumerator() {}
//////////////////////////////////////////////////////////////////////////////
/// @brief Get the next Path element from the traversal.
//////////////////////////////////////////////////////////////////////////////
bool next() override;
aql::AqlValue lastVertexToAqlValue() override;
aql::AqlValue lastEdgeToAqlValue() override;
aql::AqlValue pathToAqlValue(arangodb::velocypack::Builder& result) override;
private:
inline size_t getDepth(size_t index) const {
size_t depth = 0;
while (index != 0) {
++depth;
index = _schreier[index].sourceIdx;
}
return depth;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Build the enumerated path for the given index in the schreier
/// vector.
//////////////////////////////////////////////////////////////////////////////
void computeEnumeratedPath(size_t index);
};
}
}
#endif

View File

@ -0,0 +1,105 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#include "NeighborsEnumerator.h"
#include "Basics/VelocyPackHelper.h"
#include "VocBase/Traverser.h"
#include "VocBase/TraverserCache.h"
using namespace arangodb;
using namespace arangodb::traverser;
using namespace arangodb::graph;
NeighborsEnumerator::NeighborsEnumerator(Traverser* traverser,
VPackSlice const& startVertex,
TraverserOptions* opts)
: PathEnumerator(traverser, startVertex.copyString(), opts),
_searchDepth(0) {
StringRef vId = _traverser->traverserCache()->persistString(StringRef(startVertex));
_allFound.insert(vId);
_currentDepth.insert(vId);
_iterator = _currentDepth.begin();
}
bool NeighborsEnumerator::next() {
if (_isFirst) {
_isFirst = false;
if (_opts->minDepth == 0) {
return true;
}
}
if (_iterator == _currentDepth.end() || ++_iterator == _currentDepth.end()) {
do {
// This depth is done. Get next
if (_opts->maxDepth == _searchDepth) {
// We are finished.
return false;
}
_lastDepth.swap(_currentDepth);
_currentDepth.clear();
StringRef v;
for (auto const& nextVertex : _lastDepth) {
auto callback = [&](StringRef const& edgeId, VPackSlice e, size_t& cursorId) {
// Counting should be done in readAll
_traverser->_readDocuments++;
if (_traverser->getSingleVertex(e, nextVertex, _searchDepth, v)) {
StringRef otherId = _traverser->traverserCache()->persistString(v);
if (_allFound.find(otherId) == _allFound.end()) {
_currentDepth.emplace(otherId);
_allFound.emplace(otherId);
}
}
};
std::unique_ptr<arangodb::traverser::EdgeCursor> cursor(
_opts->nextCursor(_traverser->mmdr(), nextVertex, _searchDepth));
cursor->readAll(callback);
}
if (_currentDepth.empty()) {
// Nothing found. Cannot do anything more.
return false;
}
++_searchDepth;
} while (_searchDepth < _opts->minDepth);
_iterator = _currentDepth.begin();
}
TRI_ASSERT(_iterator != _currentDepth.end());
return true;
}
arangodb::aql::AqlValue NeighborsEnumerator::lastVertexToAqlValue() {
TRI_ASSERT(_iterator != _currentDepth.end());
return _traverser->fetchVertexData(*_iterator);
}
arangodb::aql::AqlValue NeighborsEnumerator::lastEdgeToAqlValue() {
// TODO should return Optimizer failed
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
arangodb::aql::AqlValue NeighborsEnumerator::pathToAqlValue(arangodb::velocypack::Builder& result) {
// TODO should return Optimizer failed
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}

View File

@ -0,0 +1,76 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_GRAPH_NEIGHBORSENUMERATOR_H
#define ARANGODB_GRAPH_NEIGHBORSENUMERATOR_H 1
#include "Basics/Common.h"
#include "VocBase/PathEnumerator.h"
#include <velocypack/Slice.h>
namespace arangodb {
namespace graph {
// @brief Enumerator optimized for neighbors. Does not allow edge access
class NeighborsEnumerator final : public arangodb::traverser::PathEnumerator {
std::unordered_set<arangodb::StringRef> _allFound;
std::unordered_set<arangodb::StringRef> _currentDepth;
std::unordered_set<arangodb::StringRef> _lastDepth;
std::unordered_set<arangodb::StringRef>::iterator _iterator;
uint64_t _searchDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::unordered_set<arangodb::velocypack::Slice> _tmpEdges;
public:
NeighborsEnumerator(arangodb::traverser::Traverser* traverser,
arangodb::velocypack::Slice const& startVertex,
arangodb::traverser::TraverserOptions* opts);
~NeighborsEnumerator() {
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Get the next Path element from the traversal.
//////////////////////////////////////////////////////////////////////////////
bool next() override;
aql::AqlValue lastVertexToAqlValue() override;
aql::AqlValue lastEdgeToAqlValue() override;
aql::AqlValue pathToAqlValue(arangodb::velocypack::Builder& result) override;
};
} // namespace graph
} // namespace arangodb
#endif

View File

@ -2588,7 +2588,6 @@ int MMFilesCollection::insert(transaction::Methods* trx,
}
}
transaction::BuilderLeaser builder(trx);
VPackSlice newSlice;
int res = TRI_ERROR_NO_ERROR;
@ -2825,7 +2824,7 @@ int MMFilesCollection::insertSecondaryIndexes(arangodb::transaction::Methods* tr
TRI_voc_rid_t revisionId,
VPackSlice const& doc,
bool isRollback) {
// Coordintor doesn't know index internals
// Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator());
TRI_IF_FAILURE("InsertSecondaryIndexes") { return TRI_ERROR_DEBUG; }
@ -3531,7 +3530,7 @@ int MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
/// the caller must make sure the read lock on the collection is held
/// the key must be a string slice, no revision check is performed
int MMFilesCollection::lookupDocument(transaction::Methods* trx,
VPackSlice const key,
VPackSlice key,
ManagedDocumentResult& result) {
if (!key.isString()) {
return TRI_ERROR_ARANGO_DOCUMENT_KEY_BAD;

View File

@ -490,7 +490,7 @@ class MMFilesCollection final : public PhysicalCollection {
int deleteSecondaryIndexes(transaction::Methods*, TRI_voc_rid_t revisionId,
velocypack::Slice const&, bool isRollback);
int lookupDocument(transaction::Methods*, velocypack::Slice const,
int lookupDocument(transaction::Methods*, velocypack::Slice,
ManagedDocumentResult& result);
int updateDocument(transaction::Methods*, TRI_voc_rid_t oldRevisionId,

View File

@ -214,130 +214,6 @@ MMFilesEdgeIndex::~MMFilesEdgeIndex() {
delete _edgesTo;
}
void MMFilesEdgeIndex::buildSearchValue(TRI_edge_direction_e dir,
std::string const& id, VPackBuilder& builder) {
builder.openArray();
switch (dir) {
case TRI_EDGE_OUT:
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, VPackValue(id));
builder.close();
builder.close();
builder.add(VPackValue(VPackValueType::Null));
break;
case TRI_EDGE_IN:
builder.add(VPackValue(VPackValueType::Null));
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, VPackValue(id));
builder.close();
builder.close();
break;
case TRI_EDGE_ANY:
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, VPackValue(id));
builder.close();
builder.close();
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, VPackValue(id));
builder.close();
builder.close();
}
builder.close();
}
void MMFilesEdgeIndex::buildSearchValue(TRI_edge_direction_e dir,
VPackSlice const& id, VPackBuilder& builder) {
TRI_ASSERT(id.isString());
builder.openArray();
switch (dir) {
case TRI_EDGE_OUT:
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
builder.close();
builder.add(VPackValue(VPackValueType::Null));
break;
case TRI_EDGE_IN:
builder.add(VPackValue(VPackValueType::Null));
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
builder.close();
break;
case TRI_EDGE_ANY:
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
builder.close();
builder.openArray();
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
builder.close();
}
builder.close();
}
void MMFilesEdgeIndex::buildSearchValueFromArray(TRI_edge_direction_e dir,
VPackSlice const ids,
VPackBuilder& builder) {
TRI_ASSERT(ids.isArray());
builder.openArray();
switch (dir) {
case TRI_EDGE_OUT:
builder.openArray();
for (auto const& id : VPackArrayIterator(ids)) {
if (id.isString()) {
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
}
}
builder.close();
builder.add(VPackValue(VPackValueType::Null));
break;
case TRI_EDGE_IN:
builder.add(VPackValue(VPackValueType::Null));
builder.openArray();
for (auto const& id : VPackArrayIterator(ids)) {
if (id.isString()) {
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
}
}
builder.close();
break;
case TRI_EDGE_ANY:
builder.openArray();
for (auto const& id : VPackArrayIterator(ids)) {
if (id.isString()) {
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
}
}
builder.close();
builder.openArray();
for (auto const& id : VPackArrayIterator(ids)) {
if (id.isString()) {
builder.openObject();
builder.add(StaticStrings::IndexEq, id);
builder.close();
}
}
builder.close();
}
builder.close();
}
/// @brief return a selectivity estimate for the index
double MMFilesEdgeIndex::selectivityEstimate(arangodb::StringRef const* attribute) const {
if (_edgesFrom == nullptr ||

View File

@ -79,19 +79,6 @@ class MMFilesEdgeIndex final : public Index {
~MMFilesEdgeIndex();
static void buildSearchValue(TRI_edge_direction_e, std::string const&,
arangodb::velocypack::Builder&);
static void buildSearchValue(TRI_edge_direction_e,
arangodb::velocypack::Slice const&,
arangodb::velocypack::Builder&);
static void buildSearchValueFromArray(TRI_edge_direction_e,
arangodb::velocypack::Slice const,
arangodb::velocypack::Builder&);
public:
/// @brief typedef for hash tables
public:
IndexType type() const override { return Index::TRI_IDX_TYPE_EDGE_INDEX; }

View File

@ -48,6 +48,7 @@
#include "MMFiles/MMFilesTransactionState.h"
#include "MMFiles/MMFilesV8Functions.h"
#include "MMFiles/MMFilesView.h"
#include "MMFiles/MMFilesWalRecoveryFeature.h"
#include "Random/RandomGenerator.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/DatabasePathFeature.h"
@ -142,6 +143,10 @@ MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
_isUpgrade(false),
_maxTick(0) {
startsAfter("MMFilesPersistentIndex");
server->addFeature(new MMFilesWalRecoveryFeature(server));
server->addFeature(new MMFilesLogfileManager(server));
server->addFeature(new MMFilesPersistentIndexFeature(server));
}
MMFilesEngine::~MMFilesEngine() {}

View File

@ -109,15 +109,14 @@ MMFilesLogfileManager::MMFilesLogfileManager(ApplicationServer* server)
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "creating WAL logfile manager";
TRI_ASSERT(!_allowWrites);
setOptional(false);
setOptional(true);
requiresElevatedPrivileges(false);
startsAfter("DatabasePath");
startsAfter("EngineSelector");
startsAfter("FeatureCache");
for (auto const& it : EngineSelectorFeature::availableEngines()) {
startsAfter(it.second);
}
startsAfter("MMFilesEngine");
onlyEnabledWith("MMFilesEngine");
}
// destroy the logfile manager

View File

@ -65,8 +65,9 @@ MMFilesPersistentIndexFeature::MMFilesPersistentIndexFeature(
_keepLogFileNum(1000), _logFileTimeToRoll(0), _compactionReadaheadSize(0) {
setOptional(true);
requiresElevatedPrivileges(false);
// startsAfter("MMFilesLogfileManager");
startsAfter("DatabasePath");
onlyEnabledWith("MMFilesEngine");
}
MMFilesPersistentIndexFeature::~MMFilesPersistentIndexFeature() {

View File

@ -37,11 +37,14 @@ using namespace arangodb::options;
MMFilesWalRecoveryFeature::MMFilesWalRecoveryFeature(ApplicationServer* server)
: ApplicationFeature(server, "MMFilesWalRecovery") {
setOptional(false);
setOptional(true);
requiresElevatedPrivileges(false);
startsAfter("Database");
startsAfter("MMFilesLogfileManager");
startsAfter("MMFilesPersistentIndex");
onlyEnabledWith("MMFilesEngine");
onlyEnabledWith("MMFilesLogfileManager");
}
/// @brief run the recovery procedure

View File

@ -46,15 +46,17 @@ IAggregator* AggregatorHandler::getAggregator(AggregatorID const& name) {
}
}
// aggregator doesn't exists, create it
{
std::unique_ptr<IAggregator> agg(_algorithm->aggregator(name));
if (agg) {
WRITE_LOCKER(guard, _lock);
std::unique_ptr<IAggregator> agg(_algorithm->aggregator(name));
if (agg) {
_values[name] = agg.get();
if (_values.find(name) == _values.end()) {
_values.emplace(name, agg.get());
return agg.release();
}
return _values[name];
} else {
return nullptr;
}
return nullptr;
}
void AggregatorHandler::aggregate(AggregatorID const& name,

View File

@ -88,7 +88,7 @@ uint32_t HLLCounter::getCount() {
} else if (estimate > (1.0 / 30.0) * pow_2_32) {
estimate = neg_pow_2_32 * log(1.0 - (estimate / pow_2_32));
}
return estimate;
return (uint32_t) estimate;
}
void HLLCounter::merge(HLLCounter const& other) {

View File

@ -56,13 +56,13 @@ struct HITSComputation
void compute(
MessageIterator<SenderMessage<double>> const& messages) override {
double auth = 0.0f;
double hub = 0.0f;
double auth = 0.0;
double hub = 0.0;
// we don't know our incoming neighbours in step 0, therfore we need step 0
// as 'initialization' before actually starting to converge
if (globalSuperstep() <= 1) {
auth = 1.0f;
hub = 1.0f;
auth = 1.0;
hub = 1.0;
} else {
HITSWorkerContext const* ctx = static_cast<HITSWorkerContext const*>(context());
for (SenderMessage<double> const* message : messages) {

View File

@ -32,7 +32,7 @@ using namespace arangodb;
using namespace arangodb::pregel;
using namespace arangodb::pregel::algos;
static float EPS = 0.00001;
static float EPS = 0.00001f;
static std::string const kConvergence = "convergence";
struct PRWorkerContext : public WorkerContext {
@ -41,9 +41,9 @@ struct PRWorkerContext : public WorkerContext {
float commonProb = 0;
void preGlobalSuperstep(uint64_t gss) override {
if (gss == 0) {
commonProb = 1.0 / vertexCount();
commonProb = 1.0f / vertexCount();
} else {
commonProb = 0.15 / vertexCount();
commonProb = 0.15f / vertexCount();
}
}
};
@ -64,11 +64,11 @@ struct PRComputation : public VertexComputation<float, float, float> {
if (globalSuperstep() == 0) {
*ptr = ctx->commonProb;
} else {
float sum = 0.0;
float sum = 0.0f;
for (const float* msg : messages) {
sum += *msg;
}
*ptr = 0.85 * sum + ctx->commonProb;
*ptr = 0.85f * sum + ctx->commonProb;
}
float diff = fabs(copy - *ptr);
aggregate<float>(kConvergence, diff);

View File

@ -115,7 +115,7 @@ struct SenderMessageFormat : public MessageFormat<SenderMessage<T>> {
SenderMessageFormat() {}
void unwrapValue(VPackSlice s, SenderMessage<T>& senderVal) const override {
VPackArrayIterator array(s);
senderVal.senderId.shard = (*array).getUInt();
senderVal.senderId.shard = (PregelShard) ((*array).getUInt());
senderVal.senderId.key = (*(++array)).copyString();
senderVal.value = (*(++array)).getNumber<T>();
}

View File

@ -522,9 +522,9 @@ void Worker<V, E, M>::_finishedProcessing() {
// async adaptive message buffering
_messageBatchSize = _algorithm->messageBatchSize(_config, _messageStats);
} else {
uint32_t tn = _config.parallelism();
uint32_t s = _messageStats.sendCount / tn / 2UL;
_messageBatchSize = s > 1000 ? s : 1000;
uint64_t tn = _config.parallelism();
uint64_t s = _messageStats.sendCount / tn / 2UL;
_messageBatchSize = s > 1000 ? (uint32_t)s : 1000;
}
_messageStats.resetTracking();
LOG_TOPIC(DEBUG, Logger::PREGEL) << "Batch size: " << _messageBatchSize;

View File

@ -82,8 +82,8 @@ void FileDescriptorsFeature::start() {
if (rlim.rlim_cur < RECOMMENDED) {
LOG_TOPIC(WARN, arangodb::Logger::SYSCALL)
<< "file-descriptors limit is too low, currently "
<< StringifyLimitValue(rlim.rlim_cur) << ", raise to at least "
<< RECOMMENDED;
<< StringifyLimitValue(rlim.rlim_cur) << ", please raise to at least "
<< RECOMMENDED << " (e.g. ulimit -n " << RECOMMENDED << ")";
}
#endif
}

View File

@ -80,14 +80,6 @@
#include "Statistics/StatisticsFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
// TODO - move the following MMFiles includes to the storage engine
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesPersistentIndexFeature.h"
#include "MMFiles/MMFilesWalRecoveryFeature.h"
#include "MMFiles/MMFilesEngine.h"
#include "RocksDBEngine/RocksDBEngine.h"
#include "V8Server/FoxxQueuesFeature.h"
#include "V8Server/V8DealerFeature.h"
@ -99,6 +91,10 @@
#include "Enterprise/RestServer/arangodEE.h"
#endif
// storage engine
#include "MMFiles/MMFilesEngine.h"
#include "RocksDBEngine/RocksDBEngine.h"
using namespace arangodb;
static int runServer(int argc, char** argv) {
@ -195,9 +191,6 @@ static int runServer(int argc, char** argv) {
// storage engines
server.addFeature(new MMFilesEngine(&server));
server.addFeature(new MMFilesWalRecoveryFeature(&server));
server.addFeature(new MMFilesLogfileManager(&server));
server.addFeature(new MMFilesPersistentIndexFeature(&server));
server.addFeature(new RocksDBEngine(&server));
try {

View File

@ -23,17 +23,27 @@
#include "RocksDBCollection.h"
#include "Basics/Result.h"
#include "Basics/StaticStrings.h"
#include "Aql/PlanCache.h"
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ClusterMethods.h"
#include "Indexes/Index.h"
#include "Indexes/IndexIterator.h"
#include "RestServer/DatabaseFeature.h"
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
#include "RocksDBEngine/RocksDBEngine.h"
#include "RocksDBEngine/RocksDBEntry.h"
#include "RocksDBEngine/RocksDBPrimaryMockIndex.h"
#include "RocksDBEngine/RocksDBToken.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "StorageEngine/TransactionState.h"
#include "Transaction/Helpers.h"
#include "Utils/CollectionNameResolver.h"
#include "Utils/OperationOptions.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/ticks.h"
#include <rocksdb/utilities/transaction.h>
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
@ -320,12 +330,76 @@ bool RocksDBCollection::readDocumentConditional(
}
int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice,
arangodb::velocypack::Slice const slice,
arangodb::ManagedDocumentResult& result,
OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick, bool lock) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
TRI_voc_tick_t& resultMarkerTick, bool /*lock*/) {
// store the tick that was used for writing the document
// note that we don't need it for this engine
resultMarkerTick = 0;
VPackSlice fromSlice;
VPackSlice toSlice;
bool const isEdgeCollection =
(_logicalCollection->type() == TRI_COL_TYPE_EDGE);
if (isEdgeCollection) {
// _from:
fromSlice = slice.get(StaticStrings::FromString);
if (!fromSlice.isString()) {
return TRI_ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE;
}
VPackValueLength len;
char const* docId = fromSlice.getString(len);
size_t split;
if (!TRI_ValidateDocumentIdKeyGenerator(docId, static_cast<size_t>(len),
&split)) {
return TRI_ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE;
}
// _to:
toSlice = slice.get(StaticStrings::ToString);
if (!toSlice.isString()) {
return TRI_ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE;
}
docId = toSlice.getString(len);
if (!TRI_ValidateDocumentIdKeyGenerator(docId, static_cast<size_t>(len),
&split)) {
return TRI_ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE;
}
}
transaction::BuilderLeaser builder(trx);
VPackSlice newSlice;
int res = TRI_ERROR_NO_ERROR;
if (options.recoveryData == nullptr) {
res = newObjectForInsert(trx, slice, fromSlice, toSlice, isEdgeCollection,
*builder.get(), options.isRestore);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
newSlice = builder->slice();
} else {
TRI_ASSERT(slice.isObject());
// we can get away with the fast hash function here, as key values are
// restricted to strings
newSlice = slice;
}
TRI_voc_rid_t revisionId = transaction::helpers::extractRevFromDocument(newSlice);
res = insertDocument(trx, revisionId, newSlice, options.waitForSync);
if (res == TRI_ERROR_NO_ERROR) {
// TODO: handle returning of result value!
// uint8_t const* vpack = lookupRevisionVPack(revisionId);
// if (vpack != nullptr) {
// result.addExisting(vpack, revisionId);
// }
}
return res;
}
int RocksDBCollection::update(arangodb::transaction::Methods* trx,
@ -344,23 +418,120 @@ int RocksDBCollection::update(arangodb::transaction::Methods* trx,
int RocksDBCollection::replace(
transaction::Methods* trx, arangodb::velocypack::Slice const newSlice,
ManagedDocumentResult& result, OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick, bool lock, TRI_voc_rid_t& prevRev,
TRI_voc_tick_t& resultMarkerTick, bool /*lock*/, TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous, TRI_voc_rid_t const revisionId,
arangodb::velocypack::Slice const fromSlice,
arangodb::velocypack::Slice const toSlice) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
resultMarkerTick = 0;
bool const isEdgeCollection = (_logicalCollection->type() == TRI_COL_TYPE_EDGE);
// get the previous revision
VPackSlice key = newSlice.get(StaticStrings::KeyString);
if (key.isNone()) {
return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD;
}
// get the previous revision
int res = lookupDocument(trx, key, previous);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
uint8_t const* vpack = previous.vpack();
VPackSlice oldDoc(vpack);
TRI_voc_rid_t oldRevisionId = transaction::helpers::extractRevFromDocument(oldDoc);
prevRev = oldRevisionId;
// Check old revision:
if (!options.ignoreRevs) {
TRI_voc_rid_t expectedRev = 0;
if (newSlice.isObject()) {
expectedRev = TRI_ExtractRevisionId(newSlice);
}
int res = checkRevision(trx, expectedRev, prevRev);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
// merge old and new values
transaction::BuilderLeaser builder(trx);
newObjectForReplace(trx, oldDoc, newSlice, fromSlice, toSlice,
isEdgeCollection, TRI_RidToString(revisionId),
*builder.get());
if (trx->state()->isDBServer()) {
// Need to check that no sharding keys have changed:
if (arangodb::shardKeysChanged(_logicalCollection->dbName(),
trx->resolver()->getCollectionNameCluster(
_logicalCollection->planId()),
oldDoc, builder->slice(), false)) {
return TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES;
}
}
res = updateDocument(trx, oldRevisionId, oldDoc, revisionId, VPackSlice(builder->slice()), options.waitForSync);
/* TODO: handle result handling
uint8_t const* vpack = lookupRevisionVPack(revisionId);
if (vpack != nullptr) {
result.addExisting(vpack, revisionId);
}
*/
return res;
}
int RocksDBCollection::remove(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const slice,
arangodb::ManagedDocumentResult& previous,
OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick, bool lock,
TRI_voc_tick_t& resultMarkerTick, bool /*lock*/,
TRI_voc_rid_t const& revisionId,
TRI_voc_rid_t& prevRev) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
// store the tick that was used for writing the document
// note that we don't need it for this engine
resultMarkerTick = 0;
prevRev = 0;
transaction::BuilderLeaser builder(trx);
newObjectForRemove(trx, slice, TRI_RidToString(revisionId), *builder.get());
VPackSlice key;
if (slice.isString()) {
key = slice;
} else {
key = slice.get(StaticStrings::KeyString);
}
TRI_ASSERT(!key.isNone());
// get the previous revision
int res = lookupDocument(trx, key, previous);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
uint8_t const* vpack = previous.vpack();
VPackSlice oldDoc(vpack);
TRI_voc_rid_t oldRevisionId = arangodb::transaction::helpers::extractRevFromDocument(oldDoc);
prevRev = oldRevisionId;
// Check old revision:
if (!options.ignoreRevs && slice.isObject()) {
TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice);
int res = checkRevision(trx, expectedRevisionId, oldRevisionId);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
res = removeDocument(trx, oldRevisionId, oldDoc, options.waitForSync);
return res;
}
void RocksDBCollection::deferDropCollection(
@ -423,7 +594,7 @@ void RocksDBCollection::addIndexCoordinator(
int RocksDBCollection::saveIndex(transaction::Methods* trx, std::shared_ptr<arangodb::Index> idx) {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
// we cannot persist PrimaryIndex
// we cannot persist PrimaryMockIndex
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
std::vector<std::shared_ptr<arangodb::Index>> indexListLocal;
indexListLocal.emplace_back(idx);
@ -450,7 +621,7 @@ int RocksDBCollection::saveIndex(transaction::Methods* trx, std::shared_ptr<aran
// WARNING: Make sure that this LogicalCollection Instance
// is somehow protected. If it goes out of all scopes
// or it's indexes are freed the pointer returned will get invalidated.
arangodb::RocksDBPrimaryIndex* RocksDBCollection::primaryIndex() const {
arangodb::RocksDBPrimaryMockIndex* RocksDBCollection::primaryIndex() const {
// The primary index always has iid 0
auto primary = _logicalCollection->lookupIndex(0);
TRI_ASSERT(primary != nullptr);
@ -467,5 +638,149 @@ arangodb::RocksDBPrimaryIndex* RocksDBCollection::primaryIndex() const {
#endif
TRI_ASSERT(primary->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
// the primary index must be the index at position #0
return static_cast<arangodb::RocksDBPrimaryIndex*>(primary.get());
return static_cast<arangodb::RocksDBPrimaryMockIndex*>(primary.get());
}
int RocksDBCollection::insertDocument(arangodb::transaction::Methods* trx,
TRI_voc_rid_t revisionId,
VPackSlice const& doc,
bool& waitForSync) {
// Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator());
RocksDBEntry entry(RocksDBEntry::Document(_objectId, revisionId, doc));
rocksdb::WriteBatch writeBatch;
writeBatch.Put(entry.key(), entry.value());
auto indexes = _indexes;
size_t const n = indexes.size();
int result = TRI_ERROR_NO_ERROR;
for (size_t i = 0; i < n; ++i) {
auto idx = indexes[i];
int res = idx->insert(trx, revisionId, doc, false);
// in case of no-memory, return immediately
if (res == TRI_ERROR_OUT_OF_MEMORY) {
return res;
}
if (res != TRI_ERROR_NO_ERROR) {
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED ||
result == TRI_ERROR_NO_ERROR) {
// "prefer" unique constraint violated
result = res;
}
}
}
if (result != TRI_ERROR_NO_ERROR) {
rocksdb::WriteOptions writeOptions;
if (_logicalCollection->waitForSync()) {
waitForSync = true;
}
if (waitForSync) {
trx->state()->waitForSync(true);
// handle waitForSync for single operations here
if (trx->state()->isSingleOperation()) {
writeOptions.sync = true;
}
}
StorageEngine* engine = EngineSelectorFeature::ENGINE;
rocksdb::TransactionDB* db = static_cast<RocksDBEngine*>(engine)->db();
db->Write(writeOptions, &writeBatch);
}
return result;
}
int RocksDBCollection::removeDocument(arangodb::transaction::Methods* trx,
TRI_voc_rid_t revisionId,
VPackSlice const& doc,
bool& waitForSync) {
// Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator());
RocksDBEntry entry(RocksDBEntry::Document(_objectId, revisionId, basics::VelocyPackHelper::EmptyObjectValue()));
rocksdb::WriteBatch writeBatch;
writeBatch.Delete(entry.key());
auto indexes = _indexes;
size_t const n = indexes.size();
int result = TRI_ERROR_NO_ERROR;
for (size_t i = 0; i < n; ++i) {
auto idx = indexes[i];
int res = idx->remove(trx, revisionId, doc, false);
// in case of no-memory, return immediately
if (res == TRI_ERROR_OUT_OF_MEMORY) {
return res;
}
}
if (result != TRI_ERROR_NO_ERROR) {
rocksdb::WriteOptions writeOptions;
if (_logicalCollection->waitForSync()) {
waitForSync = true;
}
if (waitForSync) {
trx->state()->waitForSync(true);
// handle waitForSync for single operations here
if (trx->state()->isSingleOperation()) {
writeOptions.sync = true;
}
}
StorageEngine* engine = EngineSelectorFeature::ENGINE;
rocksdb::TransactionDB* db = static_cast<RocksDBEngine*>(engine)->db();
db->Write(writeOptions, &writeBatch);
}
return result;
}
/// @brief looks up a document by key, low level worker
/// the key must be a string slice, no revision check is performed
int RocksDBCollection::lookupDocument(transaction::Methods* trx,
VPackSlice key,
ManagedDocumentResult& result) {
if (!key.isString()) {
return TRI_ERROR_ARANGO_DOCUMENT_KEY_BAD;
}
RocksDBToken token = primaryIndex()->lookupKey(trx, key, result);
TRI_voc_rid_t revisionId = token.revisionId();
if (revisionId > 0) {
// TODO: add result handling!
/* uint8_t const* vpack = lookupRevisionVPack(revisionId);
if (vpack != nullptr) {
result.addExisting(vpack, revisionId);
}
*/
return TRI_ERROR_NO_ERROR;
}
return TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND;
}
int RocksDBCollection::updateDocument(
transaction::Methods* trx, TRI_voc_rid_t oldRevisionId,
VPackSlice const& oldDoc, TRI_voc_rid_t newRevisionId,
VPackSlice const& newDoc, bool& waitForSync) {
// TODO
return TRI_ERROR_NO_ERROR;
}

View File

@ -31,12 +31,11 @@
#include "VocBase/ManagedDocumentResult.h"
#include "VocBase/PhysicalCollection.h"
namespace arangodb {
class LogicalCollection;
class ManagedDocumentResult;
class Result;
class RocksDBPrimaryIndex;
class RocksDBPrimaryMockIndex;
class RocksDBCollection final : public PhysicalCollection {
friend class RocksDBEngine;
@ -180,7 +179,25 @@ class RocksDBCollection final : public PhysicalCollection {
void addIndex(std::shared_ptr<arangodb::Index> idx);
void addIndexCoordinator(std::shared_ptr<arangodb::Index> idx);
int saveIndex(transaction::Methods* trx, std::shared_ptr<arangodb::Index> idx);
arangodb::RocksDBPrimaryIndex* primaryIndex() const;
arangodb::RocksDBPrimaryMockIndex* primaryIndex() const;
int insertDocument(arangodb::transaction::Methods* trx,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc,
bool& waitForSync);
int removeDocument(arangodb::transaction::Methods* trx,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc,
bool& waitForSync);
int lookupDocument(transaction::Methods* trx,
arangodb::velocypack::Slice key,
ManagedDocumentResult& result);
int updateDocument(transaction::Methods* trx, TRI_voc_rid_t oldRevisionId,
arangodb::velocypack::Slice const& oldDoc, TRI_voc_rid_t newRevisionId,
arangodb::velocypack::Slice const& newDoc, bool& waitForSync);
private:
uint64_t _objectId; // rocksdb-specific object id for collection

View File

@ -23,7 +23,7 @@
#include "RocksDBCommon.h"
using namespace arangodb::rocksdb;
using namespace arangodb;
arangodb::Result convertStatus(::rocksdb::Status const& status, StatusHint hint) {
switch (status.code()) {

View File

@ -21,8 +21,8 @@
/// @author Daniel H. Larkin
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGO_ROCKSDB_ROCKSDB_TYPES_H
#define ARANGO_ROCKSDB_ROCKSDB_TYPES_H 1
#ifndef ARANGO_ROCKSDB_ROCKSDB_COMMON_H
#define ARANGO_ROCKSDB_ROCKSDB_COMMON_H 1
#include "Basics/Common.h"
#include "Basics/Result.h"
@ -30,14 +30,14 @@
#include <rocksdb/status.h>
namespace arangodb {
namespace rocksdb {
//namespace rocksdb {
enum StatusHint { none, document, collection, view, index, database };
arangodb::Result convertStatus(::rocksdb::Status const&,
arangodb::Result convertRocksDBStatus(::rocksdb::Status const&,
StatusHint hint = StatusHint::none);
} // namespace rocksdb
//} // namespace rocksdb
} // namespace arangodb
#endif

View File

@ -155,6 +155,12 @@ void RocksDBEngine::addParametersForNewCollection(VPackBuilder& builder, VPackSl
}
}
void RocksDBEngine::addParametersForNewIndex(VPackBuilder& builder, VPackSlice info) {
if (!info.hasKey("objectId")) {
builder.add("objectId", VPackValue(std::to_string(TRI_NewTickServer())));
}
}
// create storage-engine specific collection
PhysicalCollection* RocksDBEngine::createPhysicalCollection(LogicalCollection* collection,
VPackSlice const& info) {

View File

@ -226,6 +226,7 @@ class RocksDBEngine final : public StorageEngine {
void addRestHandlers(rest::RestHandlerFactory*) override;
void addParametersForNewCollection(arangodb::velocypack::Builder& builder, arangodb::velocypack::Slice info) override;
void addParametersForNewIndex(arangodb::velocypack::Builder& builder, arangodb::velocypack::Slice info) override;
rocksdb::TransactionDB* db() const { return _db; }

View File

@ -28,7 +28,7 @@
#include "Indexes/Index.h"
#include "RocksDBEngine/RocksDBEngine.h"
#include "RocksDBEngine/RocksDBEdgeIndex.h"
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
#include "RocksDBEngine/RocksDBPrimaryMockIndex.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "VocBase/voc-types.h"
@ -336,7 +336,7 @@ std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"cannot create primary index");
}
newIdx.reset(new arangodb::RocksDBPrimaryIndex(col));
newIdx.reset(new arangodb::RocksDBPrimaryMockIndex(col, info));
break;
}
case arangodb::Index::TRI_IDX_TYPE_EDGE_INDEX: {
@ -367,10 +367,14 @@ std::shared_ptr<Index> RocksDBIndexFactory::prepareIndexFromSlice(
void RocksDBIndexFactory::fillSystemIndexes(
arangodb::LogicalCollection* col,
std::vector<std::shared_ptr<arangodb::Index>>& systemIndexes) const {
// create primary index
systemIndexes.emplace_back(
std::make_shared<arangodb::RocksDBPrimaryIndex>(col));
// create primary index
VPackBuilder builder;
builder.openObject();
builder.close();
systemIndexes.emplace_back(
std::make_shared<arangodb::RocksDBPrimaryMockIndex>(col, builder.slice()));
// create edges index
if (col->type() == TRI_COL_TYPE_EDGE) {
RocksDBEngine *engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);

View File

@ -0,0 +1,227 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "RocksDBPrimaryMockIndex.h"
#include "Aql/AstNode.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"
#include "Basics/VelocyPackHelper.h"
#include "Indexes/SimpleAttributeEqualityMatcher.h"
#include "RocksDBEngine/RocksDBEntry.h"
#include "Transaction/Helpers.h"
#include "Transaction/Methods.h"
#include "Transaction/Context.h"
#include "VocBase/LogicalCollection.h"
#include <velocypack/Builder.h>
#include <velocypack/Collection.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
/// @brief hard-coded vector of the index attributes
/// note that the attribute names must be hard-coded here to avoid an init-order
/// fiasco with StaticStrings::FromString etc.
static std::vector<std::vector<arangodb::basics::AttributeName>> const IndexAttributes
{{arangodb::basics::AttributeName("_id", false)},
{arangodb::basics::AttributeName("_key", false)}};
RocksDBPrimaryMockIndexIterator::RocksDBPrimaryMockIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index,
std::unique_ptr<VPackBuilder>& keys)
: IndexIterator(collection, trx, mmdr, index),
_index(index),
_keys(keys.get()),
_iterator(_keys->slice()) {
keys.release(); // now we have ownership for _keys
TRI_ASSERT(_keys->slice().isArray());
}
RocksDBPrimaryMockIndexIterator::~RocksDBPrimaryMockIndexIterator() {
if (_keys != nullptr) {
// return the VPackBuilder to the transaction context
_trx->transactionContextPtr()->returnBuilder(_keys.release());
}
}
bool RocksDBPrimaryMockIndexIterator::next(TokenCallback const& cb, size_t limit) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return false;
}
void RocksDBPrimaryMockIndexIterator::reset() { _iterator.reset(); }
RocksDBAllIndexIterator::RocksDBAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index,
bool reverse)
: IndexIterator(collection, trx, mmdr, index), _reverse(reverse), _total(0) {}
bool RocksDBAllIndexIterator::next(TokenCallback const& cb, size_t limit) {
// TODO
return false;
}
void RocksDBAllIndexIterator::reset() {
// TODO
}
RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index)
: IndexIterator(collection, trx, mmdr, index) {}
bool RocksDBAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return true;
}
void RocksDBAnyIndexIterator::reset() {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
RocksDBPrimaryMockIndex::RocksDBPrimaryMockIndex(arangodb::LogicalCollection* collection, VPackSlice const& info)
: Index(basics::VelocyPackHelper::stringUInt64(info, "objectId"), collection,
std::vector<std::vector<arangodb::basics::AttributeName>>(
{{arangodb::basics::AttributeName(StaticStrings::KeyString, false)}}),
true, false),
_objectId(basics::VelocyPackHelper::stringUInt64(info, "objectId")){
}
RocksDBPrimaryMockIndex::~RocksDBPrimaryMockIndex() {}
/// @brief return the number of documents from the index
size_t RocksDBPrimaryMockIndex::size() const {
// TODO
return 0;
}
/// @brief return the memory usage of the index
size_t RocksDBPrimaryMockIndex::memory() const {
return 0; // TODO
}
/// @brief return a VelocyPack representation of the index
void RocksDBPrimaryMockIndex::toVelocyPack(VPackBuilder& builder, bool withFigures) const {
Index::toVelocyPack(builder, withFigures);
// hard-coded
builder.add("unique", VPackValue(true));
builder.add("sparse", VPackValue(false));
}
/// @brief return a VelocyPack representation of the index figures
void RocksDBPrimaryMockIndex::toVelocyPackFigures(VPackBuilder& builder) const {
Index::toVelocyPackFigures(builder);
// TODO: implement
}
RocksDBToken RocksDBPrimaryMockIndex::lookupKey(transaction::Methods* trx, VPackSlice slice, ManagedDocumentResult& result) {
std::string key = slice.copyString();
std::lock_guard<std::mutex> lock(_keyRevMutex);
LOG_TOPIC(ERR, Logger::FIXME) << "LOOKUP. THE KEY IS: " << key;
auto it = _keyRevMap.find(key);
if (it == _keyRevMap.end()) {
return RocksDBToken();
}
return RocksDBToken((*it).second);
}
int RocksDBPrimaryMockIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId, VPackSlice const& slice, bool) {
std::string key = slice.get("_key").copyString();
std::lock_guard<std::mutex> lock(_keyRevMutex);
LOG_TOPIC(ERR, Logger::FIXME) << "INSERT. THE KEY IS: " << key << "; THE REVISION IS: " << revisionId;
auto result = _keyRevMap.emplace(key, revisionId);
if(result.second){
return TRI_ERROR_NO_ERROR;
}
return TRI_ERROR_INTERNAL;
}
int RocksDBPrimaryMockIndex::remove(transaction::Methods*, TRI_voc_rid_t revisionId, VPackSlice const& slice, bool) {
std::string key = slice.get("_key").copyString();
std::lock_guard<std::mutex> lock(_keyRevMutex);
LOG_TOPIC(ERR, Logger::FIXME) << "REMOVE. THE KEY IS: " << key;
auto result = _keyRevMap.erase(key); //result number of deleted elements
if(result){
return TRI_ERROR_NO_ERROR;
}
return TRI_ERROR_INTERNAL;
}
/// @brief unload the index data from memory
int RocksDBPrimaryMockIndex::unload() {
// nothing to do
return TRI_ERROR_NO_ERROR;
}
/// @brief checks whether the index supports the condition
bool RocksDBPrimaryMockIndex::supportsFilterCondition(
arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) const {
SimpleAttributeEqualityMatcher matcher(IndexAttributes);
return matcher.matchOne(this, node, reference, itemsInIndex, estimatedItems,
estimatedCost);
}
/// @brief creates an IndexIterator for the given Condition
IndexIterator* RocksDBPrimaryMockIndex::iteratorForCondition(
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, bool reverse) const {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return nullptr;
}
/// @brief specializes the condition for use with the index
arangodb::aql::AstNode* RocksDBPrimaryMockIndex::specializeCondition(
arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) const {
SimpleAttributeEqualityMatcher matcher(IndexAttributes);
return matcher.specializeOne(this, node, reference);
}
/// @brief request an iterator over all elements in the index in
/// a sequential order.
IndexIterator* RocksDBPrimaryMockIndex::allIterator(transaction::Methods* trx,
ManagedDocumentResult* mmdr,
bool reverse) const {
return new RocksDBAllIndexIterator(_collection, trx, mmdr, this, reverse);
}
/// @brief request an iterator over all elements in the index in
/// a random order. It is guaranteed that each element is found
/// exactly once unless the collection is modified.
IndexIterator* RocksDBPrimaryMockIndex::anyIterator(transaction::Methods* trx,
ManagedDocumentResult* mmdr) const {
return new RocksDBAnyIndexIterator(_collection, trx, mmdr, this);
}

View File

@ -0,0 +1,176 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_ROCKSDB_ENGINE_ROCKSDB_PRIMARY_INDEX_H
#define ARANGOD_ROCKSDB_ENGINE_ROCKSDB_PRIMARY_INDEX_H 1
#include "Basics/Common.h"
#include "Indexes/Index.h"
#include "Indexes/IndexIterator.h"
#include "RocksDBEngine/RocksDBToken.h"
#include "VocBase/vocbase.h"
#include "VocBase/voc-types.h"
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
#include <mutex>
namespace arangodb {
class ManagedDocumentResult;
class RocksDBPrimaryMockIndex;
namespace transaction {
class Methods;
}
class RocksDBPrimaryMockIndexIterator final : public IndexIterator {
public:
RocksDBPrimaryMockIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index,
std::unique_ptr<VPackBuilder>& keys);
~RocksDBPrimaryMockIndexIterator();
char const* typeName() const override { return "primary-index-iterator"; }
bool next(TokenCallback const& cb, size_t limit) override;
void reset() override;
private:
RocksDBPrimaryMockIndex const* _index;
std::unique_ptr<VPackBuilder> _keys;
arangodb::velocypack::ArrayIterator _iterator;
};
class RocksDBAllIndexIterator final : public IndexIterator {
public:
RocksDBAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index,
bool reverse);
~RocksDBAllIndexIterator() {}
char const* typeName() const override { return "all-index-iterator"; }
bool next(TokenCallback const& cb, size_t limit) override;
void reset() override;
private:
bool const _reverse;
uint64_t _total;
};
class RocksDBAnyIndexIterator final : public IndexIterator {
public:
RocksDBAnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryMockIndex const* index);
~RocksDBAnyIndexIterator() {}
char const* typeName() const override { return "any-index-iterator"; }
bool next(TokenCallback const& cb, size_t limit) override;
void reset() override;
};
class RocksDBPrimaryMockIndex final : public Index {
friend class RocksDBPrimaryMockIndexIterator;
public:
RocksDBPrimaryMockIndex() = delete;
explicit RocksDBPrimaryMockIndex(arangodb::LogicalCollection*, VPackSlice const& info);
~RocksDBPrimaryMockIndex();
public:
IndexType type() const override {
return Index::TRI_IDX_TYPE_PRIMARY_INDEX;
}
char const* typeName() const override { return "primary"; }
bool allowExpansion() const override { return false; }
bool canBeDropped() const override { return false; }
bool isSorted() const override { return false; }
bool hasSelectivityEstimate() const override { return true; }
double selectivityEstimate(arangodb::StringRef const* = nullptr) const override { return 1.0; }
size_t size() const;
size_t memory() const override;
void toVelocyPack(VPackBuilder&, bool) const override;
void toVelocyPackFigures(VPackBuilder&) const override;
RocksDBToken lookupKey(transaction::Methods* trx, arangodb::velocypack::Slice key, ManagedDocumentResult& result);
int insert(transaction::Methods*, TRI_voc_rid_t, arangodb::velocypack::Slice const&, bool isRollback) override;
int remove(transaction::Methods*, TRI_voc_rid_t, arangodb::velocypack::Slice const&, bool isRollback) override;
int unload() override;
bool supportsFilterCondition(arangodb::aql::AstNode const*,
arangodb::aql::Variable const*, size_t, size_t&,
double&) const override;
IndexIterator* iteratorForCondition(transaction::Methods*,
ManagedDocumentResult*,
arangodb::aql::AstNode const*,
arangodb::aql::Variable const*,
bool) const override;
arangodb::aql::AstNode* specializeCondition(
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
/// @brief request an iterator over all elements in the index in
/// a sequential order.
IndexIterator* allIterator(transaction::Methods*, ManagedDocumentResult*, bool reverse) const;
/// @brief request an iterator over all elements in the index in
/// a random order. It is guaranteed that each element is found
/// exactly once unless the collection is modified.
IndexIterator* anyIterator(transaction::Methods*, ManagedDocumentResult*) const;
uint64_t objectId() const { return _objectId; }
private:
uint64_t _objectId;
std::unordered_map<std::string, TRI_voc_rid_t> _keyRevMap;
std::mutex _keyRevMutex;
};
}
#endif

View File

@ -0,0 +1,48 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_ROCKSDB_ENGINE_ROCKSDB_TOKEN_H
#define ARANGOD_ROCKSDB_ENGINE_ROCKSDB_TOKEN_H 1
#include "StorageEngine/DocumentIdentifierToken.h"
namespace arangodb {
struct RocksDBToken : public DocumentIdentifierToken {
public:
RocksDBToken() : DocumentIdentifierToken() {}
explicit RocksDBToken(TRI_voc_rid_t revisionId)
: DocumentIdentifierToken(revisionId) {}
RocksDBToken(RocksDBToken const& other)
: DocumentIdentifierToken(other._data) {}
inline TRI_voc_rid_t revisionId() const {
return static_cast<TRI_voc_rid_t>(_data);
}
};
static_assert(sizeof(RocksDBToken) == sizeof(uint64_t), "invalid RocksDBToken size");
}
#endif

View File

@ -38,15 +38,10 @@ RocksDBTransactionCollection::RocksDBTransactionCollection(TransactionState* trx
TRI_voc_cid_t cid,
AccessMode::Type accessType)
: TransactionCollection(trx, cid),
_waitForSync(false),
_accessType(accessType),
_numOperations(0) {
LOG_TOPIC(ERR, Logger::FIXME) << "ctor rocksdb transaction collection: " << cid;
}
_numOperations(0) {}
RocksDBTransactionCollection::~RocksDBTransactionCollection() {
LOG_TOPIC(ERR, Logger::FIXME) << "dtor rocksdb transaction collection: " << _cid;
}
RocksDBTransactionCollection::~RocksDBTransactionCollection() {}
/// @brief request a main-level lock for a collection
int RocksDBTransactionCollection::lock() { return TRI_ERROR_NO_ERROR; }
@ -131,7 +126,6 @@ int RocksDBTransactionCollection::use(int nestingLevel) {
TRI_vocbase_col_status_e status;
LOG_TRX(_transaction, nestingLevel) << "using collection " << _cid;
_collection = _transaction->vocbase()->useCollection(_cid, status);
LOG_TOPIC(ERR, Logger::FIXME) << "using collection " << _cid << ": " << _collection;
if (_collection == nullptr) {
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
@ -142,9 +136,6 @@ int RocksDBTransactionCollection::use(int nestingLevel) {
!LogicalCollection::IsSystemName(_collection->name())) {
return TRI_ERROR_ARANGO_READ_ONLY;
}
// store the waitForSync property
_waitForSync = _collection->waitForSync();
}
return TRI_ERROR_NO_ERROR;
@ -155,7 +146,6 @@ void RocksDBTransactionCollection::unuse(int /*nestingLevel*/) {}
void RocksDBTransactionCollection::release() {
// the top level transaction releases all collections
if (_collection != nullptr) {
LOG_TOPIC(ERR, Logger::FIXME) << "releasing collection " << _cid << ": " << _collection;
// unuse collection, remove usage-lock
LOG_TRX(_transaction, 0) << "unusing collection " << _cid;

View File

@ -70,8 +70,6 @@ class RocksDBTransactionCollection final : public TransactionCollection {
void release() override;
private:
bool _waitForSync; // whether or not the collection has waitForSync
AccessMode::Type _accessType; // access type (read|write)
uint64_t _numOperations;
};

View File

@ -51,14 +51,10 @@ struct RocksDBTransactionData final : public TransactionData {
RocksDBTransactionState::RocksDBTransactionState(TRI_vocbase_t* vocbase)
: TransactionState(vocbase),
_beginWritten(false),
_hasOperations(false) {
LOG_TOPIC(ERR, Logger::FIXME) << "ctor rocksdb transaction state: " << this;
}
_hasOperations(false) {}
/// @brief free a transaction container
RocksDBTransactionState::~RocksDBTransactionState() {
LOG_TOPIC(ERR, Logger::FIXME) << "dtor rocksdb transaction state: " << this;
}
RocksDBTransactionState::~RocksDBTransactionState() {}
/// @brief start a transaction
int RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
@ -85,8 +81,6 @@ int RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
int res = useCollections(_nestingLevel);
LOG_TOPIC(ERR, Logger::FIXME) << "USE COLLECTIONS RETURNED: " << res << ", NESTING: " << _nestingLevel;
if (res == TRI_ERROR_NO_ERROR) {
// all valid
if (_nestingLevel == 0) {

View File

@ -92,6 +92,10 @@ class StorageEngine : public application_features::ApplicationFeature {
// when a new collection is created, this method is called to augment the collection
// creation data with engine-specific information
virtual void addParametersForNewCollection(VPackBuilder& builder, VPackSlice info) {}
// when a new index is created, this method is called to augment the index
// creation data with engine-specific information
virtual void addParametersForNewIndex(VPackBuilder& builder, VPackSlice info) {}
// create storage-engine specific collection
virtual PhysicalCollection* createPhysicalCollection(LogicalCollection*, VPackSlice const&) = 0;

View File

@ -93,7 +93,6 @@ TransactionCollection* TransactionState::collection(TRI_voc_cid_t cid, AccessMod
int TransactionState::addCollection(TRI_voc_cid_t cid,
AccessMode::Type accessType,
int nestingLevel, bool force) {
LOG_TOPIC(ERR, Logger::FIXME) << "add collection: " << cid << ", " << this;
LOG_TRX(this, nestingLevel) << "adding collection " << cid;
// LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "cid: " << cid
@ -139,9 +138,7 @@ int TransactionState::addCollection(TRI_voc_cid_t cid,
TRI_ASSERT(trxCollection == nullptr);
StorageEngine* engine = EngineSelectorFeature::ENGINE;
LOG_TOPIC(ERR, Logger::FIXME) << "creating trx collection: " << cid << ", " << this;
trxCollection = engine->createTransactionCollection(this, cid, accessType, nestingLevel);
LOG_TOPIC(ERR, Logger::FIXME) << "created trx collection: " << cid << ", " << this << "; " << trxCollection;
TRI_ASSERT(trxCollection != nullptr);
@ -166,8 +163,6 @@ int TransactionState::ensureCollections(int nestingLevel) {
int TransactionState::useCollections(int nestingLevel) {
int res = TRI_ERROR_NO_ERROR;
LOG_TOPIC(ERR, Logger::FIXME) << "use collections " << this;
// process collections in forward order
for (auto& trxCollection : _collections) {
res = trxCollection->use(nestingLevel);

View File

@ -2578,7 +2578,6 @@ arangodb::LogicalCollection* transaction::Methods::documentCollection(
TRI_ASSERT(trxCollection != nullptr);
TRI_ASSERT(_state->status() == transaction::Status::RUNNING);
LOG_TOPIC(ERR, Logger::FIXME) << "accessing collection " << trxCollection->id() << ": " << trxCollection;
TRI_ASSERT(trxCollection->collection() != nullptr);
return trxCollection->collection();

View File

@ -24,13 +24,22 @@
#include "PathEnumerator.h"
#include "Basics/VelocyPackHelper.h"
#include "VocBase/Traverser.h"
#include "VocBase/TraverserCache.h"
using PathEnumerator = arangodb::traverser::PathEnumerator;
using DepthFirstEnumerator = arangodb::traverser::DepthFirstEnumerator;
using BreadthFirstEnumerator = arangodb::traverser::BreadthFirstEnumerator;
using NeighborsEnumerator = arangodb::traverser::NeighborsEnumerator;
using Traverser = arangodb::traverser::Traverser;
using TraverserOptions = arangodb::traverser::TraverserOptions;
PathEnumerator::PathEnumerator(Traverser* traverser, std::string const& startVertex,
TraverserOptions* opts)
: _traverser(traverser), _isFirst(true), _opts(opts) {
StringRef svId = _opts->cache()->persistString(StringRef(startVertex));
// Guarantee that this vertex _id does not run away
_enumeratedPath.vertices.push_back(svId);
TRI_ASSERT(_enumeratedPath.vertices.size() == 1);
}
bool DepthFirstEnumerator::next() {
if (_isFirst) {
_isFirst = false;
@ -43,13 +52,11 @@ bool DepthFirstEnumerator::next() {
return false;
}
size_t cursorId = 0;
while (true) {
if (_enumeratedPath.edges.size() < _opts->maxDepth) {
// We are not done with this path, so
// we reserve the cursor for next depth
auto cursor = _opts->nextCursor(_traverser->mmdr(), _enumeratedPath.vertices.back(),
auto cursor = _opts->nextCursor(_traverser->mmdr(), StringRef(_enumeratedPath.vertices.back()),
_enumeratedPath.edges.size());
if (cursor != nullptr) {
_edgeCursors.emplace(cursor);
@ -65,34 +72,39 @@ bool DepthFirstEnumerator::next() {
while (!_edgeCursors.empty()) {
TRI_ASSERT(_edgeCursors.size() == _enumeratedPath.edges.size() + 1);
auto& cursor = _edgeCursors.top();
if (cursor->next(_enumeratedPath.edges, cursorId)) {
bool foundPath = false;
bool exitInnerLoop = false;
auto callback = [&] (StringRef const& eid, VPackSlice const& edge, size_t cursorId) {
++_traverser->_readDocuments;
_enumeratedPath.edges.push_back(eid);
_opts->cache()->insertDocument(StringRef(eid), edge); // TODO handle in cursor directly?
if (_opts->uniqueEdges == TraverserOptions::UniquenessLevel::GLOBAL) {
if (_returnedEdges.find(_enumeratedPath.edges.back()) ==
_returnedEdges.end()) {
if (_returnedEdges.find(eid) == _returnedEdges.end()) {
// Edge not yet visited. Mark and continue.
_returnedEdges.emplace(_enumeratedPath.edges.back());
_returnedEdges.emplace(eid);
} else {
_traverser->_filteredPaths++;
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.edges.pop_back();
continue;
return;
}
}
if (!_traverser->edgeMatchesConditions(_enumeratedPath.edges.back(),
_enumeratedPath.vertices.back(),
if (!_traverser->edgeMatchesConditions(edge,
StringRef(_enumeratedPath.vertices.back()),
_enumeratedPath.edges.size() - 1,
cursorId)) {
// This edge does not pass the filtering
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.edges.pop_back();
continue;
// This edge does not pass the filtering
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.edges.pop_back();
return;
}
if (_opts->uniqueEdges == TraverserOptions::UniquenessLevel::PATH) {
auto& e = _enumeratedPath.edges.back();
StringRef const& e = _enumeratedPath.edges.back();
bool foundOnce = false;
for (auto const& it : _enumeratedPath.edges) {
for (StringRef const& it : _enumeratedPath.edges) {
if (foundOnce) {
foundOnce = false; // if we leave with foundOnce == false we found the edge earlier
break;
@ -106,13 +118,12 @@ bool DepthFirstEnumerator::next() {
// This edge is allready on the path
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.edges.pop_back();
continue;
return;
}
}
// We have to check if edge and vertex is valid
if (_traverser->getVertex(_enumeratedPath.edges.back(),
_enumeratedPath.vertices)) {
if (_traverser->getVertex(edge, _enumeratedPath.vertices)) {
// case both are valid.
if (_opts->uniqueVertices == TraverserOptions::UniquenessLevel::PATH) {
auto& e = _enumeratedPath.vertices.back();
@ -120,7 +131,7 @@ bool DepthFirstEnumerator::next() {
for (auto const& it : _enumeratedPath.vertices) {
if (foundOnce) {
foundOnce = false; // if we leave with foundOnce == false we
// found the edge earlier
// found the edge earlier
break;
}
if (it == e) {
@ -133,20 +144,28 @@ bool DepthFirstEnumerator::next() {
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.vertices.pop_back();
_enumeratedPath.edges.pop_back();
continue;
return;
}
}
if (_enumeratedPath.edges.size() < _opts->minDepth) {
// Do not return, but leave this loop. Continue with the outer.
break;
exitInnerLoop = true;
return;
}
return true;
foundPath = true;
return;
}
// Vertex Invalid. Revoke edge
TRI_ASSERT(!_enumeratedPath.edges.empty());
_enumeratedPath.edges.pop_back();
continue;
};
if (cursor->next(callback)) {
if (foundPath) {
return true;
} else if(exitInnerLoop) {
break;
}
} else {
// cursor is empty.
_edgeCursors.pop();
@ -155,25 +174,25 @@ bool DepthFirstEnumerator::next() {
_enumeratedPath.vertices.pop_back();
}
}
}
}// while (!_edgeCursors.empty())
if (_edgeCursors.empty()) {
// If we get here all cursors are exhausted.
_enumeratedPath.edges.clear();
_enumeratedPath.vertices.clear();
return false;
}
}
}// while (true)
}
arangodb::aql::AqlValue DepthFirstEnumerator::lastVertexToAqlValue() {
return _traverser->fetchVertexData(_enumeratedPath.vertices.back());
return _traverser->fetchVertexData(StringRef(_enumeratedPath.vertices.back()));
}
arangodb::aql::AqlValue DepthFirstEnumerator::lastEdgeToAqlValue() {
if (_enumeratedPath.edges.empty()) {
return arangodb::aql::AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
return _traverser->fetchEdgeData(_enumeratedPath.edges.back());
return _traverser->fetchEdgeData(StringRef(_enumeratedPath.edges.back()));
}
arangodb::aql::AqlValue DepthFirstEnumerator::pathToAqlValue(arangodb::velocypack::Builder& result) {
@ -182,280 +201,15 @@ arangodb::aql::AqlValue DepthFirstEnumerator::pathToAqlValue(arangodb::velocypac
result.add(VPackValue("edges"));
result.openArray();
for (auto const& it : _enumeratedPath.edges) {
_traverser->addEdgeToVelocyPack(it, result);
_traverser->addEdgeToVelocyPack(StringRef(it), result);
}
result.close();
result.add(VPackValue("vertices"));
result.openArray();
for (auto const& it : _enumeratedPath.vertices) {
_traverser->addVertexToVelocyPack(it, result);
_traverser->addVertexToVelocyPack(StringRef(it), result);
}
result.close();
result.close();
return arangodb::aql::AqlValue(result.slice());
}
BreadthFirstEnumerator::BreadthFirstEnumerator(Traverser* traverser,
VPackSlice startVertex,
TraverserOptions const* opts)
: PathEnumerator(traverser, startVertex, opts),
_schreierIndex(1),
_lastReturned(0),
_currentDepth(0),
_toSearchPos(0) {
_schreier.reserve(32);
_schreier.emplace_back(startVertex);
_toSearch.emplace_back(NextStep(0));
}
bool BreadthFirstEnumerator::next() {
if (_isFirst) {
_isFirst = false;
if (_opts->minDepth == 0) {
computeEnumeratedPath(_lastReturned++);
return true;
}
_lastReturned++;
}
if (_lastReturned < _schreierIndex) {
// We still have something on our stack.
// Paths have been read but not returned.
computeEnumeratedPath(_lastReturned++);
return true;
}
if (_opts->maxDepth == 0) {
// Short circuit.
// We cannot find any path of length 0 or less
return false;
}
// Avoid large call stacks.
// Loop will be left if we are either finished
// with searching.
// Or we found vertices in the next depth for
// a vertex.
while (true) {
if (_toSearchPos >= _toSearch.size()) {
// This depth is done. GoTo next
if (_nextDepth.empty()) {
// That's it. we are done.
_enumeratedPath.edges.clear();
_enumeratedPath.vertices.clear();
return false;
}
// Save copies:
// We clear current
// we swap current and next.
// So now current is filled
// and next is empty.
_toSearch.clear();
_toSearchPos = 0;
_toSearch.swap(_nextDepth);
_currentDepth++;
TRI_ASSERT(_toSearchPos < _toSearch.size());
TRI_ASSERT(_nextDepth.empty());
TRI_ASSERT(_currentDepth < _opts->maxDepth);
}
// This access is always safe.
// If not it should have bailed out before.
TRI_ASSERT(_toSearchPos < _toSearch.size());
_tmpEdges.clear();
auto const nextIdx = _toSearch[_toSearchPos++].sourceIdx;
auto const nextVertex = _schreier[nextIdx].vertex;
std::unique_ptr<arangodb::traverser::EdgeCursor> cursor(_opts->nextCursor(_traverser->mmdr(), nextVertex, _currentDepth));
if (cursor != nullptr) {
size_t cursorIdx;
bool shouldReturnPath = _currentDepth + 1 >= _opts->minDepth;
bool didInsert = false;
while (cursor->readAll(_tmpEdges, cursorIdx)) {
if (!_tmpEdges.empty()) {
_traverser->_readDocuments += _tmpEdges.size();
VPackSlice v;
for (auto const& e : _tmpEdges) {
if (_opts->uniqueEdges ==
TraverserOptions::UniquenessLevel::GLOBAL) {
if (_returnedEdges.find(e) == _returnedEdges.end()) {
// Edge not yet visited. Mark and continue.
_returnedEdges.emplace(e);
} else {
_traverser->_filteredPaths++;
continue;
}
}
if (!_traverser->edgeMatchesConditions(e, nextVertex,
_currentDepth,
cursorIdx)) {
continue;
}
if (_traverser->getSingleVertex(e, nextVertex, _currentDepth, v)) {
_schreier.emplace_back(nextIdx, e, v);
if (_currentDepth < _opts->maxDepth - 1) {
_nextDepth.emplace_back(NextStep(_schreierIndex));
}
_schreierIndex++;
didInsert = true;
}
}
_tmpEdges.clear();
}
}
if (!shouldReturnPath) {
_lastReturned = _schreierIndex;
didInsert = false;
}
if (didInsert) {
// We exit the loop here.
// _schreierIndex is moved forward
break;
}
}
// Nothing found for this vertex.
// _toSearchPos is increased so
// we are not stuck in an endless loop
}
// _lastReturned points to the last used
// entry. We compute the path to it
// and increase the schreierIndex to point
// to the next free position.
computeEnumeratedPath(_lastReturned++);
return true;
}
// TODO Optimize this. Remove enumeratedPath
// All can be read from schreier vector directly
arangodb::aql::AqlValue BreadthFirstEnumerator::lastVertexToAqlValue() {
return _traverser->fetchVertexData(
_enumeratedPath.vertices.back());
}
arangodb::aql::AqlValue BreadthFirstEnumerator::lastEdgeToAqlValue() {
if (_enumeratedPath.edges.empty()) {
return arangodb::aql::AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
return _traverser->fetchEdgeData(_enumeratedPath.edges.back());
}
arangodb::aql::AqlValue BreadthFirstEnumerator::pathToAqlValue(
arangodb::velocypack::Builder& result) {
result.clear();
result.openObject();
result.add(VPackValue("edges"));
result.openArray();
for (auto const& it : _enumeratedPath.edges) {
_traverser->addEdgeToVelocyPack(it, result);
}
result.close();
result.add(VPackValue("vertices"));
result.openArray();
for (auto const& it : _enumeratedPath.vertices) {
_traverser->addVertexToVelocyPack(it, result);
}
result.close();
result.close();
return arangodb::aql::AqlValue(result.slice());
}
void BreadthFirstEnumerator::computeEnumeratedPath(size_t index) {
TRI_ASSERT(index < _schreier.size());
size_t depth = getDepth(index);
_enumeratedPath.edges.clear();
_enumeratedPath.vertices.clear();
_enumeratedPath.edges.resize(depth);
_enumeratedPath.vertices.resize(depth + 1);
// Computed path. Insert it into the path enumerator.
while (index != 0) {
TRI_ASSERT(depth > 0);
PathStep const& current = _schreier[index];
_enumeratedPath.vertices[depth] = current.vertex;
_enumeratedPath.edges[depth - 1] = current.edge;
index = current.sourceIdx;
--depth;
}
_enumeratedPath.vertices[0] = _schreier[0].vertex;
}
NeighborsEnumerator::NeighborsEnumerator(Traverser* traverser,
VPackSlice startVertex,
TraverserOptions const* opts)
: PathEnumerator(traverser, startVertex, opts),
_searchDepth(0) {
_allFound.insert(arangodb::basics::VPackHashedSlice(startVertex));
_currentDepth.insert(arangodb::basics::VPackHashedSlice(startVertex));
_iterator = _currentDepth.begin();
}
bool NeighborsEnumerator::next() {
if (_isFirst) {
_isFirst = false;
if (_opts->minDepth == 0) {
return true;
}
}
if (_iterator == _currentDepth.end() || ++_iterator == _currentDepth.end()) {
do {
// This depth is done. Get next
if (_opts->maxDepth == _searchDepth) {
// We are finished.
return false;
}
_lastDepth.swap(_currentDepth);
_currentDepth.clear();
for (auto const& nextVertex : _lastDepth) {
size_t cursorIdx = 0;
std::unique_ptr<arangodb::traverser::EdgeCursor> cursor(
_opts->nextCursor(_traverser->mmdr(), nextVertex.slice, _searchDepth));
while (cursor->readAll(_tmpEdges, cursorIdx)) {
if (!_tmpEdges.empty()) {
_traverser->_readDocuments += _tmpEdges.size();
VPackSlice v;
for (auto const& e : _tmpEdges) {
if (_traverser->getSingleVertex(e, nextVertex.slice, _searchDepth, v)) {
arangodb::basics::VPackHashedSlice hashed(v);
if (_allFound.find(hashed) == _allFound.end()) {
_currentDepth.emplace(hashed);
_allFound.emplace(hashed);
}
}
}
_tmpEdges.clear();
}
}
}
if (_currentDepth.empty()) {
// Nothing found. Cannot do anything more.
return false;
}
++_searchDepth;
} while (_searchDepth < _opts->minDepth);
_iterator = _currentDepth.begin();
}
TRI_ASSERT(_iterator != _currentDepth.end());
return true;
}
arangodb::aql::AqlValue NeighborsEnumerator::lastVertexToAqlValue() {
TRI_ASSERT(_iterator != _currentDepth.end());
return _traverser->fetchVertexData((*_iterator).slice);
}
arangodb::aql::AqlValue NeighborsEnumerator::lastEdgeToAqlValue() {
// TODO should return Optimizer failed
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
arangodb::aql::AqlValue NeighborsEnumerator::pathToAqlValue(arangodb::velocypack::Builder& result) {
// TODO should return Optimizer failed
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}

View File

@ -25,6 +25,7 @@
#define ARANGODB_VOCBASE_PATHENUMERATOR_H 1
#include "Basics/Common.h"
#include "VocBase/Traverser.h"
#include "VocBase/TraverserOptions.h"
#include <velocypack/Slice.h>
#include <stack>
@ -43,8 +44,8 @@ class Traverser;
struct TraverserOptions;
struct EnumeratedPath {
std::vector<arangodb::velocypack::Slice> edges;
std::vector<arangodb::velocypack::Slice> vertices;
std::vector<arangodb::StringRef> edges;
std::vector<arangodb::StringRef> vertices;
EnumeratedPath() {}
};
@ -69,10 +70,10 @@ class PathEnumerator {
bool _isFirst;
//////////////////////////////////////////////////////////////////////////////
/// @brief Maximal path length which should be enumerated.
/// @brief Options used in the traversal
//////////////////////////////////////////////////////////////////////////////
TraverserOptions const* _opts;
TraverserOptions* _opts;
//////////////////////////////////////////////////////////////////////////////
/// @brief List of the last path is used to
@ -81,16 +82,11 @@ class PathEnumerator {
EnumeratedPath _enumeratedPath;
/// @brief List which edges have been visited already.
std::unordered_set<arangodb::velocypack::Slice> _returnedEdges;
std::unordered_set<arangodb::StringRef> _returnedEdges;
public:
PathEnumerator(Traverser* traverser, arangodb::velocypack::Slice startVertex,
TraverserOptions const* opts)
: _traverser(traverser), _isFirst(true), _opts(opts) {
TRI_ASSERT(startVertex.isString());
_enumeratedPath.vertices.push_back(startVertex);
TRI_ASSERT(_enumeratedPath.vertices.size() == 1);
}
PathEnumerator(Traverser* traverser, std::string const& startVertex,
TraverserOptions* opts);
virtual ~PathEnumerator() {}
@ -117,8 +113,8 @@ class DepthFirstEnumerator final : public PathEnumerator {
public:
DepthFirstEnumerator(Traverser* traverser,
arangodb::velocypack::Slice startVertex,
TraverserOptions const* opts)
std::string const& startVertex,
TraverserOptions* opts)
: PathEnumerator(traverser, startVertex, opts) {}
~DepthFirstEnumerator() {
@ -142,184 +138,6 @@ class DepthFirstEnumerator final : public PathEnumerator {
aql::AqlValue pathToAqlValue(arangodb::velocypack::Builder& result) override;
};
class BreadthFirstEnumerator final : public PathEnumerator {
private:
//////////////////////////////////////////////////////////////////////////////
/// @brief One entry in the schreier vector
//////////////////////////////////////////////////////////////////////////////
struct PathStep {
size_t sourceIdx;
arangodb::velocypack::Slice edge;
arangodb::velocypack::Slice vertex;
private:
PathStep() {}
public:
explicit PathStep(arangodb::velocypack::Slice vertex) : sourceIdx(0), vertex(vertex) {}
PathStep(size_t sourceIdx, arangodb::velocypack::Slice edge,
arangodb::velocypack::Slice vertex)
: sourceIdx(sourceIdx), edge(edge), vertex(vertex) {}
};
//////////////////////////////////////////////////////////////////////////////
/// @brief Struct to hold all information required to get the list of
/// connected edges
//////////////////////////////////////////////////////////////////////////////
struct NextStep {
size_t sourceIdx;
private:
NextStep() = delete;
public:
explicit NextStep(size_t sourceIdx)
: sourceIdx(sourceIdx) {}
};
//////////////////////////////////////////////////////////////////////////////
/// @brief schreier vector to store the visited vertices
//////////////////////////////////////////////////////////////////////////////
std::vector<PathStep> _schreier;
//////////////////////////////////////////////////////////////////////////////
/// @brief Next free index in schreier vector.
//////////////////////////////////////////////////////////////////////////////
size_t _schreierIndex;
//////////////////////////////////////////////////////////////////////////////
/// @brief Position of the last returned value in the schreier vector
//////////////////////////////////////////////////////////////////////////////
size_t _lastReturned;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector to store where to continue search on next depth
//////////////////////////////////////////////////////////////////////////////
std::vector<NextStep> _nextDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::vector<NextStep> _toSearch;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::unordered_set<arangodb::velocypack::Slice> _tmpEdges;
//////////////////////////////////////////////////////////////////////////////
/// @brief Marker for the search depth. Used to abort searching.
//////////////////////////////////////////////////////////////////////////////
uint64_t _currentDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief position in _toSearch. If this is >= _toSearch.size() we are done
/// with this depth.
//////////////////////////////////////////////////////////////////////////////
size_t _toSearchPos;
public:
BreadthFirstEnumerator(Traverser* traverser,
arangodb::velocypack::Slice startVertex,
TraverserOptions const* opts);
~BreadthFirstEnumerator() {}
//////////////////////////////////////////////////////////////////////////////
/// @brief Get the next Path element from the traversal.
//////////////////////////////////////////////////////////////////////////////
bool next() override;
aql::AqlValue lastVertexToAqlValue() override;
aql::AqlValue lastEdgeToAqlValue() override;
aql::AqlValue pathToAqlValue(arangodb::velocypack::Builder& result) override;
private:
inline size_t getDepth(size_t index) const {
size_t depth = 0;
while (index != 0) {
++depth;
index = _schreier[index].sourceIdx;
}
return depth;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Build the enumerated path for the given index in the schreier
/// vector.
//////////////////////////////////////////////////////////////////////////////
void computeEnumeratedPath(size_t index);
};
// @brief Enumerator optimized for neighbors. Does not allow edge access
class NeighborsEnumerator final : public PathEnumerator {
std::unordered_set<arangodb::basics::VPackHashedSlice,
arangodb::basics::VelocyPackHelper::VPackHashedStringHash,
arangodb::basics::VelocyPackHelper::VPackHashedStringEqual>
_allFound;
std::unordered_set<arangodb::basics::VPackHashedSlice,
arangodb::basics::VelocyPackHelper::VPackHashedStringHash,
arangodb::basics::VelocyPackHelper::VPackHashedStringEqual>
_currentDepth;
std::unordered_set<arangodb::basics::VPackHashedSlice,
arangodb::basics::VelocyPackHelper::VPackHashedStringHash,
arangodb::basics::VelocyPackHelper::VPackHashedStringEqual>
_lastDepth;
std::unordered_set<arangodb::basics::VPackHashedSlice, arangodb::basics::VelocyPackHelper::VPackHashedStringHash, arangodb::basics::VelocyPackHelper::VPackHashedStringEqual>::iterator _iterator;
uint64_t _searchDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth
//////////////////////////////////////////////////////////////////////////////
std::unordered_set<arangodb::velocypack::Slice> _tmpEdges;
public:
NeighborsEnumerator(Traverser* traverser,
arangodb::velocypack::Slice startVertex,
TraverserOptions const* opts);
~NeighborsEnumerator() {
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Get the next Path element from the traversal.
//////////////////////////////////////////////////////////////////////////////
bool next() override;
aql::AqlValue lastVertexToAqlValue() override;
aql::AqlValue lastEdgeToAqlValue() override;
aql::AqlValue pathToAqlValue(arangodb::velocypack::Builder& result) override;
};
} // namespace traverser
} // namespace arangodb

View File

@ -23,12 +23,18 @@
#include "SingleServerTraverser.h"
#include "Basics/StringRef.h"
#include "Aql/AqlValue.h"
#include "Graph/BreadthFirstEnumerator.h"
#include "Graph/NeighborsEnumerator.h"
#include "Transaction/Methods.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/ManagedDocumentResult.h"
#include "VocBase/TraverserCache.h"
using namespace arangodb;
using namespace arangodb::traverser;
using namespace arangodb::graph;
////////////////////////////////////////////////////////////////////////////////
/// @brief Get a document by it's ID. Also lazy locks the collection.
@ -37,28 +43,11 @@ using namespace arangodb::traverser;
/// On all other cases this function throws.
////////////////////////////////////////////////////////////////////////////////
static int FetchDocumentById(transaction::Methods* trx,
StringRef const& id,
ManagedDocumentResult& result) {
size_t pos = id.find('/');
if (pos == std::string::npos) {
TRI_ASSERT(false);
return TRI_ERROR_INTERNAL;
}
int res = trx->documentFastPathLocal(id.substr(0, pos).toString(),
id.substr(pos + 1).toString(), result);
if (res != TRI_ERROR_NO_ERROR && res != TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND) {
THROW_ARANGO_EXCEPTION(res);
}
return res;
}
SingleServerEdgeCursor::SingleServerEdgeCursor(ManagedDocumentResult* mmdr,
transaction::Methods* trx,
TraverserOptions* opts,
size_t nrCursors, std::vector<size_t> const* mapping)
: _trx(trx),
: _opts(opts),
_trx(opts->_trx),
_mmdr(mmdr),
_cursors(),
_currentCursor(0),
@ -70,22 +59,24 @@ SingleServerEdgeCursor::SingleServerEdgeCursor(ManagedDocumentResult* mmdr,
_cache.reserve(1000);
};
bool SingleServerEdgeCursor::next(std::vector<VPackSlice>& result,
size_t& cursorId) {
bool SingleServerEdgeCursor::next(std::function<void(StringRef const&, VPackSlice, size_t)> callback) {
if (_currentCursor == _cursors.size()) {
return false;
}
if (_cachePos < _cache.size()) {
LogicalCollection* collection = _cursors[_currentCursor][_currentSubCursor]->collection();
if (collection->readDocument(_trx, _cache[_cachePos++], *_mmdr)) {
result.emplace_back(_mmdr->vpack());
}
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
cursorId = _internalCursorMapping->at(_currentCursor);
} else {
cursorId = _currentCursor;
VPackSlice edgeDocument(_mmdr->vpack());
std::string eid = _trx->extractIdString(edgeDocument);
StringRef persId = _opts->cache()->persistString(StringRef(eid));
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
callback(persId, edgeDocument, _internalCursorMapping->at(_currentCursor));
} else {
callback(persId, edgeDocument, _currentCursor);
}
}
return true;
}
// We need to refill the cache.
@ -132,105 +123,73 @@ bool SingleServerEdgeCursor::next(std::vector<VPackSlice>& result,
TRI_ASSERT(_cachePos < _cache.size());
LogicalCollection* collection = cursor->collection();
if (collection->readDocument(_trx, _cache[_cachePos++], *_mmdr)) {
result.emplace_back(_mmdr->vpack());
}
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
cursorId = _internalCursorMapping->at(_currentCursor);
} else {
cursorId = _currentCursor;
VPackSlice edgeDocument(_mmdr->vpack());
std::string eid = _trx->extractIdString(edgeDocument);
StringRef persId = _opts->cache()->persistString(StringRef(eid));
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
callback(persId, edgeDocument, _internalCursorMapping->at(_currentCursor));
} else {
callback(persId, edgeDocument, _currentCursor);
}
}
return true;
}
bool SingleServerEdgeCursor::readAll(std::unordered_set<VPackSlice>& result,
size_t& cursorId) {
if (_currentCursor >= _cursors.size()) {
return false;
}
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
cursorId = _internalCursorMapping->at(_currentCursor);
} else {
cursorId = _currentCursor;
}
auto& cursorSet = _cursors[_currentCursor];
for (auto& cursor : cursorSet) {
LogicalCollection* collection = cursor->collection();
auto cb = [&] (DocumentIdentifierToken const& token) {
if (collection->readDocument(_trx, token, *_mmdr)) {
result.emplace(_mmdr->vpack());
void SingleServerEdgeCursor::readAll(std::function<void(StringRef const&, arangodb::velocypack::Slice, size_t&)> callback) {
size_t cursorId = 0;
for (_currentCursor = 0; _currentCursor < _cursors.size(); ++_currentCursor) {
if (_internalCursorMapping != nullptr) {
TRI_ASSERT(_currentCursor < _internalCursorMapping->size());
cursorId = _internalCursorMapping->at(_currentCursor);
} else {
cursorId = _currentCursor;
}
auto& cursorSet = _cursors[_currentCursor];
for (auto& cursor : cursorSet) {
LogicalCollection* collection = cursor->collection();
auto cb = [&] (DocumentIdentifierToken const& token) {
if (collection->readDocument(_trx, token, *_mmdr)) {
VPackSlice doc(_mmdr->vpack());
std::string tmpId = _trx->extractIdString(doc);
StringRef edgeId = _opts->cache()->persistString(StringRef(tmpId));
callback(edgeId, doc, cursorId);
}
};
while (cursor->getMore(cb, 1000)) {
}
};
while (cursor->getMore(cb, 1000)) {
}
}
_currentCursor++;
return true;
}
SingleServerTraverser::SingleServerTraverser(TraverserOptions* opts,
transaction::Methods* trx,
ManagedDocumentResult* mmdr)
: Traverser(opts, trx, mmdr) {}
: Traverser(opts, trx, mmdr) {}
SingleServerTraverser::~SingleServerTraverser() {}
aql::AqlValue SingleServerTraverser::fetchVertexData(VPackSlice id) {
TRI_ASSERT(id.isString());
auto it = _vertices.find(id);
if (it == _vertices.end()) {
StringRef ref(id);
int res = FetchDocumentById(_trx, ref, *_mmdr);
++_readDocuments;
if (res != TRI_ERROR_NO_ERROR) {
return aql::AqlValue(basics::VelocyPackHelper::NullValue());
}
uint8_t const* p = _mmdr->vpack();
_vertices.emplace(id, p);
return aql::AqlValue(p, aql::AqlValueFromManagedDocument());
}
return aql::AqlValue((*it).second, aql::AqlValueFromManagedDocument());
aql::AqlValue SingleServerTraverser::fetchVertexData(StringRef vid) {
return _opts->cache()->fetchAqlResult(vid);
}
aql::AqlValue SingleServerTraverser::fetchEdgeData(VPackSlice edge) {
return aql::AqlValue(edge);
aql::AqlValue SingleServerTraverser::fetchEdgeData(StringRef edge) {
return _opts->cache()->fetchAqlResult(edge);
}
void SingleServerTraverser::addVertexToVelocyPack(VPackSlice id,
void SingleServerTraverser::addVertexToVelocyPack(StringRef vid,
VPackBuilder& result) {
TRI_ASSERT(id.isString());
auto it = _vertices.find(id);
if (it == _vertices.end()) {
StringRef ref(id);
int res = FetchDocumentById(_trx, ref, *_mmdr);
++_readDocuments;
if (res != TRI_ERROR_NO_ERROR) {
result.add(basics::VelocyPackHelper::NullValue());
} else {
uint8_t const* p = _mmdr->vpack();
_vertices.emplace(id, p);
result.addExternal(p);
}
} else {
result.addExternal((*it).second);
}
_opts->cache()->insertIntoResult(vid, result);
}
void SingleServerTraverser::addEdgeToVelocyPack(VPackSlice edge,
void SingleServerTraverser::addEdgeToVelocyPack(StringRef edge,
VPackBuilder& result) {
result.addExternal(edge.begin());
_opts->cache()->insertIntoResult(edge, result);
}
void SingleServerTraverser::setStartVertex(std::string const& v) {
void SingleServerTraverser::setStartVertex(std::string const& vid) {
_startIdBuilder->clear();
_startIdBuilder->add(VPackValue(v));
_startIdBuilder->add(VPackValue(vid));
VPackSlice idSlice = _startIdBuilder->slice();
if (!vertexMatchesConditions(idSlice, 0)) {
@ -239,7 +198,8 @@ void SingleServerTraverser::setStartVertex(std::string const& v) {
return;
}
_vertexGetter->reset(idSlice);
StringRef persId = _opts->cache()->persistString(StringRef(vid));
_vertexGetter->reset(persId);
if (_opts->useBreadthFirst) {
if (_canUseOptimizedNeighbors) {
@ -248,17 +208,21 @@ void SingleServerTraverser::setStartVertex(std::string const& v) {
_enumerator.reset(new BreadthFirstEnumerator(this, idSlice, _opts));
}
} else {
_enumerator.reset(new DepthFirstEnumerator(this, idSlice, _opts));
_enumerator.reset(new DepthFirstEnumerator(this, vid, _opts));
}
_done = false;
}
size_t SingleServerTraverser::getAndResetReadDocuments() {
return _opts->cache()->getAndResetInsertedDocuments();
}
bool SingleServerTraverser::getVertex(VPackSlice edge,
std::vector<VPackSlice>& result) {
std::vector<StringRef>& result) {
return _vertexGetter->getVertex(edge, result);
}
bool SingleServerTraverser::getSingleVertex(VPackSlice edge, VPackSlice vertex,
uint64_t depth, VPackSlice& result) {
return _vertexGetter->getSingleVertex(edge, vertex, depth, result);
bool SingleServerTraverser::getSingleVertex(VPackSlice edge, StringRef const sourceVertexId,
uint64_t depth, StringRef& targetVertexId) {
return _vertexGetter->getSingleVertex(edge, sourceVertexId, depth, targetVertexId);
}

View File

@ -38,9 +38,10 @@ class ManagedDocumentResult;
namespace traverser {
class PathEnumerator;
class SingleServerEdgeCursor : public EdgeCursor {
private:
TraverserOptions* _opts;
transaction::Methods* _trx;
ManagedDocumentResult* _mmdr;
std::vector<std::vector<OperationCursor*>> _cursors;
@ -51,7 +52,7 @@ class SingleServerEdgeCursor : public EdgeCursor {
std::vector<size_t> const* _internalCursorMapping;
public:
SingleServerEdgeCursor(ManagedDocumentResult* mmdr, transaction::Methods* trx, size_t, std::vector<size_t> const* mapping = nullptr);
SingleServerEdgeCursor(ManagedDocumentResult* mmdr, TraverserOptions* options, size_t, std::vector<size_t> const* mapping = nullptr);
~SingleServerEdgeCursor() {
for (auto& it : _cursors) {
@ -61,9 +62,9 @@ class SingleServerEdgeCursor : public EdgeCursor {
}
}
bool next(std::vector<arangodb::velocypack::Slice>&, size_t&) override;
bool next(std::function<void(arangodb::StringRef const&, VPackSlice, size_t)> callback) override;
bool readAll(std::unordered_set<arangodb::velocypack::Slice>&, size_t&) override;
void readAll(std::function<void(arangodb::StringRef const&, arangodb::velocypack::Slice, size_t&)>) override;
std::vector<std::vector<OperationCursor*>>& getCursors() {
return _cursors;
@ -82,62 +83,65 @@ class SingleServerTraverser final : public Traverser {
//////////////////////////////////////////////////////////////////////////////
void setStartVertex(std::string const& v) override;
size_t getAndResetReadDocuments() override;
protected:
/// @brief Function to load the other sides vertex of an edge
/// Returns true if the vertex passes filtering conditions
/// Adds the _id of the vertex into the given vector
bool getVertex(arangodb::velocypack::Slice,
std::vector<arangodb::velocypack::Slice>&) override;
bool getVertex(arangodb::velocypack::Slice edge,
std::vector<arangodb::StringRef>&) override;
/// @brief Function to load the other sides vertex of an edge
/// Returns true if the vertex passes filtering conditions
bool getSingleVertex(arangodb::velocypack::Slice, arangodb::velocypack::Slice,
uint64_t depth, arangodb::velocypack::Slice&) override;
bool getSingleVertex(arangodb::velocypack::Slice edge,
arangodb::StringRef const sourceVertexId,
uint64_t depth,
arangodb::StringRef& targetVertexId) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of a vertex into an AQLValue
//////////////////////////////////////////////////////////////////////////////
aql::AqlValue fetchVertexData(arangodb::velocypack::Slice) override;
aql::AqlValue fetchVertexData(StringRef) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of an edge into an AQLValue
//////////////////////////////////////////////////////////////////////////////
aql::AqlValue fetchEdgeData(arangodb::velocypack::Slice) override;
aql::AqlValue fetchEdgeData(StringRef) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to add the real data of a vertex into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void addVertexToVelocyPack(arangodb::velocypack::Slice,
void addVertexToVelocyPack(StringRef,
arangodb::velocypack::Builder&) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to add the real data of an edge into a velocypack builder
//////////////////////////////////////////////////////////////////////////////
void addEdgeToVelocyPack(arangodb::velocypack::Slice,
void addEdgeToVelocyPack(StringRef,
arangodb::velocypack::Builder&) override;
private:
//////////////////////////////////////////////////////////////////////////////
/// @brief Cache for vertex documents, points from _id to start of
/// document VPack value (in datafiles)
//////////////////////////////////////////////////////////////////////////////
std::unordered_map<arangodb::velocypack::Slice, uint8_t const*> _vertices;
//std::unordered_map<arangodb::velocypack::Slice, uint8_t const*> _vertices;
//////////////////////////////////////////////////////////////////////////////
/// @brief Cache for edge documents, points from _id to start of edge
/// VPack value (in datafiles)
//////////////////////////////////////////////////////////////////////////////
std::unordered_map<std::string, uint8_t const*> _edges;
//std::unordered_map<std::string, uint8_t const*> _edges;
};
} // namespace traverser

View File

@ -28,13 +28,14 @@
#include "Transaction/Context.h"
#include "VocBase/KeyGenerator.h"
#include "VocBase/TraverserOptions.h"
#include "VocBase/TraverserCache.h"
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::traverser;
using Traverser = arangodb::traverser::Traverser;
/// @brief Class Shortest Path
/// @brief Clears the path
@ -75,96 +76,89 @@ void arangodb::traverser::ShortestPath::vertexToVelocyPack(transaction::Methods*
}
}
bool Traverser::VertexGetter::getVertex(
VPackSlice edge, std::vector<VPackSlice>& result) {
VPackSlice cmp = result.back();
bool Traverser::VertexGetter::getVertex(VPackSlice edge, std::vector<StringRef>& result) {
VPackSlice res = transaction::helpers::extractFromFromDocument(edge);
if (cmp == res) {
if (result.back() == StringRef(res)) {
res = transaction::helpers::extractToFromDocument(edge);
}
if (!_traverser->vertexMatchesConditions(res, result.size())) {
return false;
}
result.emplace_back(res);
result.emplace_back(_traverser->traverserCache()->persistString(StringRef(res)));
return true;
}
bool Traverser::VertexGetter::getSingleVertex(VPackSlice edge,
VPackSlice cmp,
uint64_t depth,
VPackSlice& result) {
bool Traverser::VertexGetter::getSingleVertex(arangodb::velocypack::Slice edge, StringRef cmp,
uint64_t depth, StringRef& result) {
VPackSlice resSlice;
VPackSlice from = transaction::helpers::extractFromFromDocument(edge);
if (from != cmp) {
result = from;
if (from.compareString(cmp.data(), cmp.length()) != 0) {
resSlice = from;
} else {
result = transaction::helpers::extractToFromDocument(edge);
resSlice = transaction::helpers::extractToFromDocument(edge);
}
return _traverser->vertexMatchesConditions(result, depth);
result = _traverser->traverserCache()->persistString(StringRef(resSlice));
return _traverser->vertexMatchesConditions(resSlice, depth);
}
void Traverser::VertexGetter::reset(arangodb::velocypack::Slice) {
void Traverser::VertexGetter::reset(StringRef const&) {
}
bool Traverser::UniqueVertexGetter::getVertex(
VPackSlice edge, std::vector<VPackSlice>& result) {
bool Traverser::UniqueVertexGetter::getVertex(VPackSlice edge, std::vector<StringRef>& result) {
VPackSlice toAdd = transaction::helpers::extractFromFromDocument(edge);
VPackSlice cmp = result.back();
if (toAdd == cmp) {
StringRef const& cmp = result.back();
TRI_ASSERT(toAdd.isString());
if (cmp == StringRef(toAdd)) {
toAdd = transaction::helpers::extractToFromDocument(edge);
}
arangodb::basics::VPackHashedSlice hashed(toAdd);
StringRef toAddStr = _traverser->traverserCache()->persistString(StringRef(toAdd));
// First check if we visited it. If not, then mark
if (_returnedVertices.find(hashed) != _returnedVertices.end()) {
if (_returnedVertices.find(toAddStr) != _returnedVertices.end()) {
// This vertex is not unique.
++_traverser->_filteredPaths;
return false;
} else {
_returnedVertices.emplace(hashed);
_returnedVertices.emplace(toAddStr);
}
if (!_traverser->vertexMatchesConditions(toAdd, result.size())) {
return false;
}
result.emplace_back(toAdd);
result.emplace_back(toAddStr);
return true;
}
bool Traverser::UniqueVertexGetter::getSingleVertex(
VPackSlice edge, VPackSlice cmp, uint64_t depth, VPackSlice& result) {
result = transaction::helpers::extractFromFromDocument(edge);
if (cmp == result) {
result = transaction::helpers::extractToFromDocument(edge);
bool Traverser::UniqueVertexGetter::getSingleVertex(arangodb::velocypack::Slice edge, StringRef cmp,
uint64_t depth, StringRef& result) {
VPackSlice resSlice = transaction::helpers::extractFromFromDocument(edge);
if (resSlice.compareString(cmp.data(), cmp.length()) == 0) {
resSlice = transaction::helpers::extractToFromDocument(edge);
}
TRI_ASSERT(resSlice.isString());
arangodb::basics::VPackHashedSlice hashed(result);
result = _traverser->traverserCache()->persistString(StringRef(resSlice));
// First check if we visited it. If not, then mark
if (_returnedVertices.find(hashed) != _returnedVertices.end()) {
if (_returnedVertices.find(result) != _returnedVertices.end()) {
// This vertex is not unique.
++_traverser->_filteredPaths;
return false;
} else {
_returnedVertices.emplace(hashed);
_returnedVertices.emplace(result);
}
return _traverser->vertexMatchesConditions(result, depth);
return _traverser->vertexMatchesConditions(resSlice, depth);
}
void Traverser::UniqueVertexGetter::reset(VPackSlice startVertex) {
void Traverser::UniqueVertexGetter::reset(arangodb::StringRef const& startVertex) {
_returnedVertices.clear();
arangodb::basics::VPackHashedSlice hashed(startVertex);
// The startVertex always counts as visited!
_returnedVertices.emplace(hashed);
_returnedVertices.emplace(startVertex);
}
Traverser::Traverser(arangodb::traverser::TraverserOptions* opts, transaction::Methods* trx,
Traverser::Traverser(arangodb::traverser::TraverserOptions* opts,
transaction::Methods* trx,
arangodb::ManagedDocumentResult* mmdr)
: _trx(trx),
_mmdr(mmdr),
@ -182,8 +176,10 @@ Traverser::Traverser(arangodb::traverser::TraverserOptions* opts, transaction::M
}
}
Traverser::~Traverser() {}
bool arangodb::traverser::Traverser::edgeMatchesConditions(VPackSlice e,
VPackSlice vid,
StringRef vid,
uint64_t depth,
size_t cursorId) {
if (!_opts->evaluateEdgeExpression(e, vid, depth, cursorId)) {
@ -196,7 +192,7 @@ bool arangodb::traverser::Traverser::edgeMatchesConditions(VPackSlice e,
bool arangodb::traverser::Traverser::vertexMatchesConditions(VPackSlice v, uint64_t depth) {
TRI_ASSERT(v.isString());
if (_opts->vertexHasFilter(depth)) {
aql::AqlValue vertex = fetchVertexData(v);
aql::AqlValue vertex = fetchVertexData(StringRef(v));
if (!_opts->evaluateVertexExpression(vertex.slice(), depth)) {
++_filteredPaths;
return false;
@ -214,6 +210,10 @@ bool arangodb::traverser::Traverser::next() {
return res;
}
TraverserCache* arangodb::traverser::Traverser::traverserCache() {
return _opts->cache();
}
arangodb::aql::AqlValue arangodb::traverser::Traverser::lastVertexToAqlValue() {
return _enumerator->lastVertexToAqlValue();
}

View File

@ -27,6 +27,7 @@
#include "Basics/Common.h"
#include "Basics/hashes.h"
#include "Basics/ShortestPathFinder.h"
#include "Basics/StringRef.h"
#include "Basics/VelocyPackHelper.h"
#include "Aql/AqlValue.h"
#include "Aql/AstNode.h"
@ -52,9 +53,17 @@ struct AstNode;
class Expression;
class Query;
}
namespace graph {
class BreadthFirstEnumerator;
class NeighborsEnumerator;
}
namespace traverser {
class PathEnumerator;
struct TraverserOptions;
class TraverserCache;
class ShortestPath {
friend class arangodb::basics::DynamicDistanceFinder<
@ -163,9 +172,9 @@ class TraversalPath {
class Traverser {
friend class BreadthFirstEnumerator;
friend class arangodb::graph::BreadthFirstEnumerator;
friend class DepthFirstEnumerator;
friend class NeighborsEnumerator;
friend class arangodb::graph::NeighborsEnumerator;
#ifdef USE_ENTERPRISE
friend class SmartDepthFirstPathEnumerator;
friend class SmartBreadthFirstPathEnumerator;
@ -185,13 +194,12 @@ class Traverser {
virtual ~VertexGetter() = default;
virtual bool getVertex(arangodb::velocypack::Slice,
std::vector<arangodb::velocypack::Slice>&);
std::vector<arangodb::StringRef>&);
virtual bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&);
virtual bool getSingleVertex(arangodb::velocypack::Slice, StringRef,
uint64_t, StringRef&);
virtual void reset(arangodb::velocypack::Slice);
virtual void reset(arangodb::StringRef const&);
protected:
Traverser* _traverser;
@ -209,16 +217,15 @@ class Traverser {
~UniqueVertexGetter() = default;
bool getVertex(arangodb::velocypack::Slice,
std::vector<arangodb::velocypack::Slice>&) override;
std::vector<arangodb::StringRef>&) override;
bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&) override;
bool getSingleVertex(arangodb::velocypack::Slice, StringRef,
uint64_t, StringRef&) override;
void reset(arangodb::velocypack::Slice) override;
void reset(arangodb::StringRef const&) override;
private:
std::unordered_set<arangodb::basics::VPackHashedSlice> _returnedVertices;
std::unordered_set<arangodb::StringRef> _returnedVertices;
};
@ -233,8 +240,8 @@ class Traverser {
/// @brief Destructor
//////////////////////////////////////////////////////////////////////////////
virtual ~Traverser() {}
virtual ~Traverser();
//////////////////////////////////////////////////////////////////////////////
/// @brief Reset the traverser to use another start vertex
//////////////////////////////////////////////////////////////////////////////
@ -260,20 +267,23 @@ class Traverser {
/// @brief Get the next possible path in the graph.
bool next();
TraverserCache* traverserCache();
protected:
/// @brief Function to load the other sides vertex of an edge
/// Returns true if the vertex passes filtering conditions
/// Also appends the _id value of the vertex in the given vector
protected:
virtual bool getVertex(arangodb::velocypack::Slice,
std::vector<arangodb::velocypack::Slice>&) = 0;
std::vector<arangodb::StringRef>&) = 0;
/// @brief Function to load the other sides vertex of an edge
/// Returns true if the vertex passes filtering conditions
virtual bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&) = 0;
virtual bool getSingleVertex(arangodb::velocypack::Slice edge,
arangodb::StringRef const sourceVertexId,
uint64_t depth,
arangodb::StringRef& targetVertexId) = 0;
public:
//////////////////////////////////////////////////////////////////////////////
@ -314,13 +324,13 @@ class Traverser {
/// @brief Get the number of documents loaded
//////////////////////////////////////////////////////////////////////////////
size_t getAndResetReadDocuments() {
virtual size_t getAndResetReadDocuments() {
size_t tmp = _readDocuments;
_readDocuments = 0;
return tmp;
}
TraverserOptions const* options() { return _opts; }
TraverserOptions* options() { return _opts; }
ManagedDocumentResult* mmdr() const { return _mmdr; }
@ -332,13 +342,13 @@ class Traverser {
bool hasMore() { return !_done; }
bool edgeMatchesConditions(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, uint64_t, size_t);
bool edgeMatchesConditions(arangodb::velocypack::Slice edge, StringRef vid,
uint64_t depth, size_t cursorId);
bool vertexMatchesConditions(arangodb::velocypack::Slice, uint64_t);
void allowOptimizedNeighbors();
protected:
/// @brief Outer top level transaction
@ -369,21 +379,21 @@ class Traverser {
/// @brief options for traversal
TraverserOptions* _opts;
bool _canUseOptimizedNeighbors;
/// @brief Function to fetch the real data of a vertex into an AQLValue
virtual aql::AqlValue fetchVertexData(arangodb::velocypack::Slice) = 0;
virtual aql::AqlValue fetchVertexData(StringRef vid) = 0;
/// @brief Function to fetch the real data of an edge into an AQLValue
virtual aql::AqlValue fetchEdgeData(arangodb::velocypack::Slice) = 0;
virtual aql::AqlValue fetchEdgeData(StringRef eid) = 0;
/// @brief Function to add the real data of a vertex into a velocypack builder
virtual void addVertexToVelocyPack(arangodb::velocypack::Slice,
virtual void addVertexToVelocyPack(StringRef vid,
arangodb::velocypack::Builder&) = 0;
/// @brief Function to add the real data of an edge into a velocypack builder
virtual void addEdgeToVelocyPack(arangodb::velocypack::Slice,
virtual void addEdgeToVelocyPack(StringRef eid,
arangodb::velocypack::Builder&) = 0;
};

View File

@ -0,0 +1,189 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#include "TraverserCache.h"
#include "Basics/StringHeap.h"
#include "Basics/StringRef.h"
#include "Basics/VelocyPackHelper.h"
#include "Cache/Common.h"
#include "Cache/Cache.h"
#include "Cache/CacheManagerFeature.h"
#include "Cache/Finding.h"
#include "Logger/Logger.h"
#include "Transaction/Methods.h"
#include "VocBase/ManagedDocumentResult.h"
#include "Aql/AqlValue.h"
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::traverser;
TraverserCache::TraverserCache(transaction::Methods* trx)
: _cache(nullptr), _mmdr(new ManagedDocumentResult{}),
_trx(trx), _insertedDocuments(0),
_stringHeap(new StringHeap{4096}) /* arbitrary block-size may be adjusted for perforamnce */ {
auto cacheManager = CacheManagerFeature::MANAGER;
TRI_ASSERT(cacheManager != nullptr);
_cache = cacheManager->createCache(cache::CacheType::Plain);
}
TraverserCache::~TraverserCache() {
auto cacheManager = CacheManagerFeature::MANAGER;
cacheManager->destroyCache(_cache);
}
// @brief Only for internal use, Cache::Finding prevents
// the cache from removing this specific object. Should not be retained
// for a longer period of time.
// DO NOT give it to a caller.
cache::Finding TraverserCache::lookup(StringRef idString) {
VPackValueLength keySize = idString.length();
void const* key = idString.data();
//uint32_t keySize = static_cast<uint32_t>(idString.byteSize());
return _cache->find(key, keySize);
}
VPackSlice TraverserCache::lookupInCollection(StringRef id) {
size_t pos = id.find('/');
if (pos == std::string::npos) {
// Invalid input. If we get here somehow we managed to store invalid _from/_to
// values or the traverser did a let an illegal start through
TRI_ASSERT(false);
return basics::VelocyPackHelper::NullValue();
}
int res = _trx->documentFastPathLocal(id.substr(0, pos).toString(),
id.substr(pos + 1).toString(), *_mmdr);
if (res != TRI_ERROR_NO_ERROR && res != TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND) {
// ok we are in a rather bad state. Better throw and abort.
THROW_ARANGO_EXCEPTION(res);
}
VPackSlice result;
if (res == TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND) {
// This is expected, we may have dangling edges. Interpret as NULL
result = basics::VelocyPackHelper::NullValue();
} else {
result = VPackSlice(_mmdr->vpack());
}
void const* key = id.begin();
VPackValueLength keySize = id.length();
void const* resVal = result.begin();
uint64_t resValSize = static_cast<uint64_t>(result.byteSize());
std::unique_ptr<cache::CachedValue> value(
cache::CachedValue::construct(key, keySize, resVal, resValSize));
if (value) {
bool success = _cache->insert(value.get());
if (!success) {
LOG_TOPIC(DEBUG, Logger::GRAPHS) << "Insert failed";
}
// Cache is responsible.
// If this failed, well we do not store it and read it again next time.
value.release();
}
++_insertedDocuments;
return result;
}
void TraverserCache::insertIntoResult(StringRef idString,
VPackBuilder& builder) {
auto finding = lookup(idString);
if (finding.found()) {
auto val = finding.value();
VPackSlice slice(val->value());
// finding makes sure that slice contant stays valid.
builder.add(slice);
} else {
// Not in cache. Fetch and insert.
builder.add(lookupInCollection(idString));
}
}
aql::AqlValue TraverserCache::fetchAqlResult(StringRef idString) {
auto finding = lookup(idString);
if (finding.found()) {
auto val = finding.value();
// finding makes sure that slice contant stays valid.
return aql::AqlValue(val->value());
}
// Not in cache. Fetch and insert.
return aql::AqlValue(lookupInCollection(idString));
}
void TraverserCache::insertDocument(StringRef idString, arangodb::velocypack::Slice const& document) {
auto finding = lookup(idString);
if (!finding.found()) {
VPackValueLength keySize = idString.length();
void const* key = idString.data();
void const* resVal = document.begin();
uint64_t resValSize = static_cast<uint64_t>(document.byteSize());
std::unique_ptr<cache::CachedValue> value(
cache::CachedValue::construct(key, keySize, resVal, resValSize));
if (value) {
bool success = _cache->insert(value.get());
if (!success) {
LOG_TOPIC(ERR, Logger::GRAPHS) << "Insert document into cache failed";
}
// Cache is responsible.
// If this failed, well we do not store it and read it again next time.
value.release();
}
++_insertedDocuments;
}
}
bool TraverserCache::validateFilter(
StringRef idString,
std::function<bool(VPackSlice const&)> filterFunc) {
auto finding = lookup(idString);
if (finding.found()) {
auto val = finding.value();
VPackSlice slice(val->value());
// finding makes sure that slice contant stays valid.
return filterFunc(slice);
}
// Not in cache. Fetch and insert.
VPackSlice slice = lookupInCollection(idString);
return filterFunc(slice);
}
StringRef TraverserCache::persistString(
StringRef const idString) {
auto it = _persistedStrings.find(idString);
if (it != _persistedStrings.end()) {
return *it;
}
StringRef res = _stringHeap->registerString(idString.begin(), idString.length());
_persistedStrings.emplace(res);
return res;
}

View File

@ -0,0 +1,161 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_VOC_BASE_TRAVERSER_CACHE_H
#define ARANGOD_VOC_BASE_TRAVERSER_CACHE_H 1
#include "Basics/Common.h"
#include "Basics/StringRef.h"
namespace arangodb {
class ManagedDocumentResult;
class StringHeap;
namespace cache {
class Cache;
class Finding;
}
namespace transaction {
class Methods;
}
namespace velocypack {
class Builder;
class Slice;
}
namespace aql {
struct AqlValue;
}
namespace traverser {
class TraverserCache {
public:
explicit TraverserCache(transaction::Methods* trx);
~TraverserCache();
//////////////////////////////////////////////////////////////////////////////
/// @brief Inserts the real document stored within the token
/// into the given builder.
/// The document will be taken from the hash-cache.
/// If it is not cached it will be looked up in the StorageEngine
//////////////////////////////////////////////////////////////////////////////
void insertIntoResult(StringRef idString,
arangodb::velocypack::Builder& builder);
//////////////////////////////////////////////////////////////////////////////
/// @brief Return AQL value containing the result
/// The document will be taken from the hash-cache.
/// If it is not cached it will be looked up in the StorageEngine
//////////////////////////////////////////////////////////////////////////////
aql::AqlValue fetchAqlResult(StringRef idString);
//////////////////////////////////////////////////////////////////////////////
/// @brief Insert value into store
//////////////////////////////////////////////////////////////////////////////
void insertDocument(StringRef idString,
arangodb::velocypack::Slice const& document);
//////////////////////////////////////////////////////////////////////////////
/// @brief Throws the document referenced by the token into the filter
/// function and returns it result.
/// The document will be taken from the hash-cache.
/// If it is not cached it will be looked up in the StorageEngine
//////////////////////////////////////////////////////////////////////////////
bool validateFilter(StringRef idString,
std::function<bool(arangodb::velocypack::Slice const&)> filterFunc);
size_t getAndResetInsertedDocuments() {
size_t tmp = _insertedDocuments;
_insertedDocuments = 0;
return tmp;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Persist the given id string. The return value is guaranteed to
/// stay valid as long as this cache is valid
//////////////////////////////////////////////////////////////////////////////
StringRef persistString(StringRef const idString);
private:
//////////////////////////////////////////////////////////////////////////////
/// @brief Lookup a document by token in the cache.
/// As long as finding is retained it is guaranteed that the result
/// stays valid. Finding should not be retained very long, if it is
/// needed for longer, copy the value.
//////////////////////////////////////////////////////////////////////////////
cache::Finding lookup(StringRef idString);
//////////////////////////////////////////////////////////////////////////////
/// @brief Lookup a document from the database and insert it into the cache.
/// The Slice returned here is only valid until the NEXT call of this
/// function.
//////////////////////////////////////////////////////////////////////////////
arangodb::velocypack::Slice lookupInCollection(
StringRef idString);
//////////////////////////////////////////////////////////////////////////////
/// @brief The hash-cache that saves documents found in the Database
//////////////////////////////////////////////////////////////////////////////
std::shared_ptr<arangodb::cache::Cache> _cache;
//////////////////////////////////////////////////////////////////////////////
/// @brief Reusable ManagedDocumentResult that temporarily takes
/// responsibility for one document.
//////////////////////////////////////////////////////////////////////////////
std::unique_ptr<ManagedDocumentResult> _mmdr;
//////////////////////////////////////////////////////////////////////////////
/// @brief Transaction to access data, This class is NOT responsible for it.
//////////////////////////////////////////////////////////////////////////////
arangodb::transaction::Methods* _trx;
//////////////////////////////////////////////////////////////////////////////
/// @brief Documents inserted in this cache
//////////////////////////////////////////////////////////////////////////////
size_t _insertedDocuments;
//////////////////////////////////////////////////////////////////////////////
/// @brief Stringheap to take care of _id strings, s.t. they stay valid
/// during the entire traversal.
//////////////////////////////////////////////////////////////////////////////
std::unique_ptr<arangodb::StringHeap> _stringHeap;
//////////////////////////////////////////////////////////////////////////////
/// @brief Set of all strings persisted in the stringHeap. So we can save some
/// memory by not storing them twice.
//////////////////////////////////////////////////////////////////////////////
std::unordered_set<arangodb::StringRef> _persistedStrings;
};
}
}
#endif

View File

@ -30,11 +30,14 @@
#include "Cluster/ClusterEdgeCursor.h"
#include "Indexes/Index.h"
#include "VocBase/SingleServerTraverser.h"
#include "VocBase/TraverserCache.h"
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb::transaction;
using VPackHelper = arangodb::basics::VelocyPackHelper;
using TraverserOptions = arangodb::traverser::TraverserOptions;
arangodb::traverser::TraverserOptions::LookupInfo::LookupInfo()
: expression(nullptr),
@ -151,6 +154,24 @@ double arangodb::traverser::TraverserOptions::LookupInfo::estimateCost(size_t& n
return 1000.0;
}
arangodb::traverser::TraverserCache* arangodb::traverser::TraverserOptions::cache() const {
return _cache.get();
}
arangodb::traverser::TraverserOptions::TraverserOptions(transaction::Methods* trx)
: _trx(trx),
_baseVertexExpression(nullptr),
_tmpVar(nullptr),
_ctx(new aql::FixedVarExpressionContext()),
_traverser(nullptr),
_isCoordinator(trx->state()->isCoordinator()),
_cache(new TraverserCache(trx)),
minDepth(1),
maxDepth(1),
useBreadthFirst(false),
uniqueVertices(UniquenessLevel::NONE),
uniqueEdges(UniquenessLevel::PATH) {}
arangodb::traverser::TraverserOptions::TraverserOptions(
transaction::Methods* trx, VPackSlice const& slice)
: _trx(trx),
@ -159,6 +180,7 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
_ctx(new aql::FixedVarExpressionContext()),
_traverser(nullptr),
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
_cache(new TraverserCache(trx)),
minDepth(1),
maxDepth(1),
useBreadthFirst(false),
@ -212,6 +234,7 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
_ctx(new aql::FixedVarExpressionContext()),
_traverser(nullptr),
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
_cache(new TraverserCache(_trx)),
minDepth(1),
maxDepth(1),
useBreadthFirst(false),
@ -370,6 +393,7 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
_ctx(new aql::FixedVarExpressionContext()),
_traverser(nullptr),
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
_cache(new TraverserCache(_trx)),
minDepth(other.minDepth),
maxDepth(other.maxDepth),
useBreadthFirst(other.useBreadthFirst),
@ -550,7 +574,7 @@ bool arangodb::traverser::TraverserOptions::vertexHasFilter(
}
bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
arangodb::velocypack::Slice edge, arangodb::velocypack::Slice vertex,
arangodb::velocypack::Slice edge, StringRef vertexId,
uint64_t depth, size_t cursorId) const {
if (_isCoordinator) {
// The Coordinator never checks conditions. The DBServer is responsible!
@ -572,9 +596,6 @@ bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
if (expression != nullptr) {
TRI_ASSERT(!expression->isV8());
expression->setVariable(_tmpVar, edge);
VPackValueLength vidLength;
char const* vid = vertex.getString(vidLength);
// inject _from/_to value
auto node = expression->nodeForModification();
@ -588,7 +609,7 @@ bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
TRI_ASSERT(idNode->type == aql::NODE_TYPE_VALUE);
TRI_ASSERT(idNode->isValueType(aql::VALUE_TYPE_STRING));
idNode->stealComputedValue();
idNode->setStringValue(vid, vidLength);
idNode->setStringValue(vertexId.data(), vertexId.length());
bool mustDestroy = false;
aql::AqlValue res = expression->execute(_trx, _ctx, mustDestroy);
@ -633,10 +654,10 @@ bool arangodb::traverser::TraverserOptions::evaluateVertexExpression(
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursor(ManagedDocumentResult* mmdr,
VPackSlice vertex,
uint64_t depth) const {
StringRef vid,
uint64_t depth) {
if (_isCoordinator) {
return nextCursorCoordinator(vertex, depth);
return nextCursorCoordinator(vid, depth);
}
TRI_ASSERT(mmdr != nullptr);
auto specific = _depthLookupInfo.find(depth);
@ -646,17 +667,15 @@ arangodb::traverser::TraverserOptions::nextCursor(ManagedDocumentResult* mmdr,
} else {
list = _baseLookupInfos;
}
return nextCursorLocal(mmdr, vertex, depth, list);
return nextCursorLocal(mmdr, vid, depth, list);
}
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursorLocal(ManagedDocumentResult* mmdr,
VPackSlice vertex, uint64_t depth, std::vector<LookupInfo>& list) const {
StringRef vid, uint64_t depth, std::vector<LookupInfo>& list) {
TRI_ASSERT(mmdr != nullptr);
auto allCursor = std::make_unique<SingleServerEdgeCursor>(mmdr, _trx, list.size());
auto allCursor = std::make_unique<SingleServerEdgeCursor>(mmdr, this, list.size());
auto& opCursors = allCursor->getCursors();
VPackValueLength vidLength;
char const* vid = vertex.getString(vidLength);
for (auto& info : list) {
auto& node = info.indexCondition;
TRI_ASSERT(node->numMembers() > 0);
@ -669,7 +688,7 @@ arangodb::traverser::TraverserOptions::nextCursorLocal(ManagedDocumentResult* mm
auto idNode = dirCmp->getMemberUnchecked(1);
TRI_ASSERT(idNode->type == aql::NODE_TYPE_VALUE);
TRI_ASSERT(idNode->isValueType(aql::VALUE_TYPE_STRING));
idNode->setStringValue(vid, vidLength);
idNode->setStringValue(vid.data(), vid.length());
}
std::vector<OperationCursor*> csrs;
csrs.reserve(info.idxHandles.size());
@ -684,9 +703,9 @@ arangodb::traverser::TraverserOptions::nextCursorLocal(ManagedDocumentResult* mm
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursorCoordinator(
VPackSlice vertex, uint64_t depth) const {
StringRef vid, uint64_t depth) {
TRI_ASSERT(_traverser != nullptr);
auto cursor = std::make_unique<ClusterEdgeCursor>(vertex, depth, _traverser);
auto cursor = std::make_unique<ClusterEdgeCursor>(vid, depth, _traverser);
return cursor.release();
}

View File

@ -25,6 +25,7 @@
#define ARANGOD_VOC_BASE_TRAVERSER_OPTIONS_H 1
#include "Basics/Common.h"
#include "Basics/StringRef.h"
#include "Aql/FixedVarExpressionContext.h"
#include "StorageEngine/TransactionState.h"
#include "Transaction/Methods.h"
@ -47,6 +48,7 @@ class TraversalNode;
namespace traverser {
class ClusterTraverser;
class TraverserCache;
/// @brief Abstract class used in the traversals
/// to abstract away access to indexes / DBServers.
@ -57,9 +59,10 @@ class EdgeCursor {
EdgeCursor() {}
virtual ~EdgeCursor() {}
virtual bool next(std::vector<arangodb::velocypack::Slice>&, size_t&) = 0;
virtual bool readAll(std::unordered_set<arangodb::velocypack::Slice>&,
size_t&) = 0;
virtual bool next(std::function<void(arangodb::StringRef const&, VPackSlice, size_t)> callback) = 0;
virtual void readAll(std::function<void(arangodb::StringRef const&, arangodb::velocypack::Slice, size_t&)>) = 0;
};
@ -110,6 +113,9 @@ struct TraverserOptions {
arangodb::traverser::ClusterTraverser* _traverser;
bool const _isCoordinator;
/// @brief the traverser cache
std::unique_ptr<TraverserCache> _cache;
public:
uint64_t minDepth;
@ -121,18 +127,7 @@ struct TraverserOptions {
UniquenessLevel uniqueEdges;
explicit TraverserOptions(transaction::Methods* trx)
: _trx(trx),
_baseVertexExpression(nullptr),
_tmpVar(nullptr),
_ctx(new aql::FixedVarExpressionContext()),
_traverser(nullptr),
_isCoordinator(trx->state()->isCoordinator()),
minDepth(1),
maxDepth(1),
useBreadthFirst(false),
uniqueVertices(UniquenessLevel::NONE),
uniqueEdges(UniquenessLevel::PATH) {}
explicit TraverserOptions(transaction::Methods* trx);
TraverserOptions(transaction::Methods*, arangodb::velocypack::Slice const&);
@ -158,12 +153,12 @@ struct TraverserOptions {
bool vertexHasFilter(uint64_t) const;
bool evaluateEdgeExpression(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, uint64_t,
StringRef vertexId, uint64_t,
size_t) const;
bool evaluateVertexExpression(arangodb::velocypack::Slice, uint64_t) const;
EdgeCursor* nextCursor(ManagedDocumentResult*, arangodb::velocypack::Slice, uint64_t) const;
EdgeCursor* nextCursor(ManagedDocumentResult*, StringRef vid, uint64_t);
void clearVariableValues();
@ -175,16 +170,18 @@ struct TraverserOptions {
double estimateCost(size_t& nrItems) const;
TraverserCache* cache() const;
private:
double costForLookupInfoList(std::vector<LookupInfo> const& list,
size_t& createItems) const;
EdgeCursor* nextCursorLocal(ManagedDocumentResult*,
arangodb::velocypack::Slice, uint64_t,
std::vector<LookupInfo>&) const;
StringRef vid, uint64_t,
std::vector<LookupInfo>&);
EdgeCursor* nextCursorCoordinator(arangodb::velocypack::Slice, uint64_t) const;
EdgeCursor* nextCursorCoordinator(StringRef vid, uint64_t);
};
}

View File

@ -256,8 +256,10 @@
}
model = JSON.stringify(model);
console.log(model);
console.log(this.type);
if (this.type._from && this.type._to) {
if (this.type === 'edge' || this.type._from) {
var callbackE = function (error, data) {
if (error) {
arangoHelper.arangoError('Error', data.responseJSON.errorMessage);
@ -267,7 +269,7 @@
}
}.bind(this);
this.collection.saveEdge(this.colid, this.docid, this.type._from, this.type._to, model, callbackE);
this.collection.saveEdge(this.colid, this.docid, $('#document-from').html(), $('#document-to').html(), model, callbackE);
} else {
var callback = function (error, data) {
if (error) {

View File

@ -2018,11 +2018,11 @@
var callback = function (error, data, id) {
if (!error) {
var attributes = '';
attributes += '<span class="title">ID </span> <span class="nodeId">' + data._id + '</span>';
if (Object.keys(data).length > 3) {
attributes += '<span class="title">ID </span> <span class="nodeId">' + data.documents[0]._id + '</span>';
if (Object.keys(data.documents[0]).length > 3) {
attributes += '<span class="title">ATTRIBUTES </span>';
}
_.each(data, function (value, key) {
_.each(data.documents[0], function (value, key) {
if (key !== '_key' && key !== '_id' && key !== '_rev' && key !== '_from' && key !== '_to') {
attributes += '<span class="nodeAttribute">' + key + '</span>';
}

View File

@ -101,6 +101,8 @@ const optionsDocumentation = [
' - `loopSleepWhen`: sleep every nth iteration',
' - `loopSleepSec`: sleep seconds between iterations',
'',
' - `storageEngine`: set to `rocksdb` or `mmfiles` - defaults to `mmfiles`',
'',
' - `server`: server_url (e.g. tcp://127.0.0.1:8529) for external server',
' - `cluster`: if set to true the tests are run with the coordinator',
' of a small local cluster',
@ -190,6 +192,7 @@ const optionsDefaults = {
'skipShebang': false,
'skipSsl': false,
'skipTimeCritical': false,
'storageEngine': 'mmfiles',
'test': undefined,
'testBuckets': undefined,
'username': 'root',
@ -261,6 +264,25 @@ let LOGS_DIR;
let UNITTESTS_DIR;
let GDB_OUTPUT = "";
function doOnePathInner(path) {
return _.filter(fs.list(makePathUnix(path)),
function (p) {
return p.substr(-3) === '.js';
})
.map(function (x) {
return fs.join(makePathUnix(path), x);
}).sort();
}
function scanTestPath(path) {
var community = doOnePathInner(path);
if (global.ARANGODB_CLIENT_VERSION(true)['enterprise-version']) {
return community.concat(doOnePathInner('enterprise/' + path));
} else {
return community;
}
}
function makeResults (testname, instanceInfo) {
const startTime = time();
@ -322,13 +344,17 @@ function makeArgsArangod (options, appDir, role) {
config = "arangod-" + role + ".conf";
}
return {
let args = {
'configuration': fs.join(CONFIG_DIR, config),
'define': 'TOP_DIR=' + TOP_DIR,
'wal.flush-timeout': options.walFlushTimeout,
'javascript.app-path': appDir,
'http.trusted-origin': options.httpTrustedOrigin || 'all'
};
if (options.storageEngine !== 'mmfiles') {
args['server.storage-engine'] = 'rocksdb';
}
return args;
}
// //////////////////////////////////////////////////////////////////////////////
@ -545,7 +571,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
var cp = corePattern.asciiSlice(0, corePattern.length);
if (matchApport.exec(cp) != null) {
print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis.");
print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis." + RESET);
return;
}
@ -556,7 +582,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
options.coreDirectory = cp.replace("%e", "*").replace("%t", "*").replace("%p", arangod.pid);
}
else {
print(RED + "Don't know howto locate corefiles in your system. '" + cpf + "' contains: '" + cp + "'");
print(RED + "Don't know howto locate corefiles in your system. '" + cpf + "' contains: '" + cp + "'" + RESET);
return;
}
}
@ -573,7 +599,7 @@ function analyzeCrash (binary, arangod, options, checkStr) {
storeArangodPath + ' for later analysis.\n' +
'Server shut down with :\n' +
yaml.safeDump(arangod) +
'marking build as crashy.');
'marking build as crashy.' + RESET);
let corePath = (options.coreDirectory === '')
? 'core'
@ -826,6 +852,7 @@ function performTests (options, testList, testname, runFn) {
let results = {};
let continueTesting = true;
let count = 0;
for (let i = 0; i < testList.length; i++) {
let te = testList[i];
@ -834,6 +861,7 @@ function performTests (options, testList, testname, runFn) {
if (filterTestcaseByOptions(te, options, filtered)) {
let first = true;
let loopCount = 0;
count += 1;
while (first || options.loopEternal) {
if (!continueTesting) {
@ -895,6 +923,15 @@ function performTests (options, testList, testname, runFn) {
}
}
if (count === 0) {
results["ALLTESTS"] = {
status: false,
skipped: true
};
results.status = false;
print(RED + "No testcase matched the filter." + RESET);
}
print('Shutting down...');
shutdownInstance(instanceInfo, options);
print('done.');
@ -1029,7 +1066,7 @@ function executeArangod (cmd, args, options) {
// / @brief executes a command and wait for result
// //////////////////////////////////////////////////////////////////////////////
function executeAndWait (cmd, args, options, valgrindTest, rootDir) {
function executeAndWait (cmd, args, options, valgrindTest, rootDir, disableCoreCheck = false) {
if (valgrindTest && options.valgrind) {
let valgrindOpts = {};
@ -1072,7 +1109,8 @@ function executeAndWait (cmd, args, options, valgrindTest, rootDir) {
let errorMessage = ' - ';
if (res.hasOwnProperty('signal') &&
if (!disableCoreCheck &&
res.hasOwnProperty('signal') &&
((res.signal === 11) ||
(res.signal === 6) ||
// Windows sometimes has random numbers in signal...
@ -1793,11 +1831,13 @@ function rubyTests (options, ssl) {
}
};
let count = 0;
for (let i = 0; i < files.length; i++) {
const te = files[i];
if (te.substr(0, 4) === 'api-' && te.substr(-3) === '.rb') {
if (filterTestcaseByOptions(te, options, filtered)) {
count += 1;
if (!continueTesting) {
print('Skipping ' + te + ' server is gone.');
@ -1870,6 +1910,15 @@ function rubyTests (options, ssl) {
print('Shutting down...');
if (count === 0) {
result["ALLTESTS"] = {
status: false,
skipped: true
};
result.status = false;
print(RED + "No testcase matched the filter." + RESET);
}
fs.remove(tmpname);
shutdownInstance(instanceInfo, options);
print('done.');
@ -1886,56 +1935,37 @@ let testsCases = {
};
function findTests () {
function doOnePathInner(path) {
return _.filter(fs.list(makePathUnix(path)),
function (p) {
return p.substr(-3) === '.js';
})
.map(function (x) {
return fs.join(makePathUnix(path), x);
}).sort();
}
function doOnePath(path) {
var community = doOnePathInner(path);
if (global.ARANGODB_CLIENT_VERSION(true)['enterprise-version']) {
return community.concat(doOnePathInner('enterprise/' + path));
} else {
return community;
}
}
if (testsCases.setup) {
return;
}
testsCases.common = doOnePath('js/common/tests/shell');
testsCases.common = scanTestPath('js/common/tests/shell');
testsCases.server_only = doOnePath('js/server/tests/shell');
testsCases.server_only = scanTestPath('js/server/tests/shell');
testsCases.client_only = doOnePath('js/client/tests/shell');
testsCases.client_only = scanTestPath('js/client/tests/shell');
testsCases.server_aql = doOnePath('js/server/tests/aql');
testsCases.server_aql = scanTestPath('js/server/tests/aql');
testsCases.server_aql = _.filter(testsCases.server_aql,
function(p) { return p.indexOf('ranges-combined') === -1; });
testsCases.server_aql_extended = doOnePath('js/server/tests/aql');
testsCases.server_aql_extended = scanTestPath('js/server/tests/aql');
testsCases.server_aql_extended = _.filter(testsCases.server_aql_extended,
function(p) { return p.indexOf('ranges-combined') !== -1; });
testsCases.server_aql_performance = doOnePath('js/server/perftests');
testsCases.server_aql_performance = scanTestPath('js/server/perftests');
testsCases.server_http = doOnePath('js/common/tests/http');
testsCases.server_http = scanTestPath('js/common/tests/http');
testsCases.replication = doOnePath('js/common/tests/replication');
testsCases.replication = scanTestPath('js/common/tests/replication');
testsCases.agency = doOnePath('js/client/tests/agency');
testsCases.agency = scanTestPath('js/client/tests/agency');
testsCases.resilience = doOnePath('js/server/tests/resilience');
testsCases.resilience = scanTestPath('js/server/tests/resilience');
testsCases.client_resilience = doOnePath('js/client/tests/resilience');
testsCases.cluster_sync = doOnePath('js/server/tests/cluster-sync');
testsCases.client_resilience = scanTestPath('js/client/tests/resilience');
testsCases.cluster_sync = scanTestPath('js/server/tests/cluster-sync');
testsCases.server = testsCases.common.concat(testsCases.server_only);
testsCases.client = testsCases.common.concat(testsCases.client_only);
@ -1950,7 +1980,7 @@ function findTests () {
function filterTestcaseByOptions (testname, options, whichFilter) {
if (options.hasOwnProperty('test') && (typeof (options.test) !== 'undefined')) {
whichFilter.filter = 'testcase';
return testname === options.test;
return testname.search(options.test) >= 0;
}
if (options.replication) {
@ -2016,6 +2046,14 @@ function filterTestcaseByOptions (testname, options, whichFilter) {
return false;
}
if ((testname.indexOf('-mmfiles') !== -1) && options.storageEngine === "rocksdb") {
whichFilter.filter = 'skip when running as rocksdb';
return false;
}
if ((testname.indexOf('-rocksdb') !== -1) && options.storageEngine === "mmfiles") {
whichFilter.filter = 'skip when running as mmfiles';
return false;
}
return true;
}
@ -3466,8 +3504,7 @@ function runArangodRecovery (instanceInfo, options, script, setup) {
}
argv = argv.concat([
'--javascript.script',
fs.join('.', 'js', 'server', 'tests', 'recovery', script + '.js')
'--javascript.script', script
]);
let binary = ARANGOD_BIN;
@ -3476,94 +3513,9 @@ function runArangodRecovery (instanceInfo, options, script, setup) {
argv.unshift(ARANGOD_BIN);
}
instanceInfo.pid = executeAndWait(binary, argv, options, "recovery", instanceInfo.rootDir);
instanceInfo.pid = executeAndWait(binary, argv, options, "recovery", instanceInfo.rootDir, setup);
}
const recoveryTests = [
'insert-update-replace',
'die-during-collector',
'disk-full-logfile',
'disk-full-logfile-data',
'disk-full-datafile',
'collection-drop-recreate',
'collection-duplicate-name',
'create-with-temp',
'create-with-temp-old',
'create-collection-fail',
'create-collection-tmpfile',
'create-database-existing',
'create-database-fail',
'empty-datafiles',
'flush-drop-database-and-fail',
'drop-database-flush-and-fail',
'drop-database-only-tmp',
'create-databases',
'recreate-databases',
'drop-databases',
'create-and-drop-databases',
'drop-database-and-fail',
'flush-drop-database-and-fail',
'collection-rename-recreate',
'collection-rename-recreate-flush',
'collection-unload',
'resume-recovery-multi-flush',
'resume-recovery-simple',
'resume-recovery-all',
'resume-recovery-other',
'resume-recovery',
'foxx-directories',
'collection-duplicate',
'collection-rename',
'collection-properties',
'empty-logfiles',
'many-logs',
'multiple-logs',
'collection-recreate',
'drop-index',
'drop-index-shutdown',
'drop-indexes',
'create-indexes',
'create-collections',
'recreate-collection',
'drop-single-collection',
'drop-collections',
'collections-reuse',
'collections-different-attributes',
'indexes-after-flush',
'indexes-hash',
'indexes-rocksdb',
'indexes-rocksdb-nosync',
'indexes-rocksdb-restore',
'indexes-sparse-hash',
'indexes-skiplist',
'indexes-sparse-skiplist',
'indexes-geo',
'edges',
'indexes',
'many-inserts',
'many-updates',
'wait-for-sync',
'attributes',
'no-journal',
'write-throttling',
'collector-oom',
'transaction-no-abort',
'transaction-no-commit',
'transaction-just-committed',
'multi-database-durability',
'disk-full-no-collection-journal',
'no-shutdown-info-with-flush',
'no-shutdown-info-no-flush',
'no-shutdown-info-multiple-logs',
'insert-update-remove',
'insert-update-remove-distance',
'big-transaction-durability',
'transaction-durability',
'transaction-durability-multiple',
'corrupt-wal-marker-multiple',
'corrupt-wal-marker-single'
];
testFuncs.recovery = function (options) {
let results = {};
@ -3577,11 +3529,16 @@ testFuncs.recovery = function (options) {
let status = true;
let recoveryTests = scanTestPath('js/server/tests/recovery');
let count = 0;
for (let i = 0; i < recoveryTests.length; ++i) {
let test = recoveryTests[i];
let filtered = {};
if (options.test === undefined || options.test === test) {
if (filterTestcaseByOptions (test, options, filtered )) {
let instanceInfo = {};
count += 1;
runArangodRecovery(instanceInfo, options, test, true);
@ -3597,13 +3554,20 @@ testFuncs.recovery = function (options) {
status = false;
}
} else {
results[test] = {
status: true,
skipped: true
};
if (options.extremeVerbosity) {
print('Skipped ' + test + ' because of ' + filtered.filter);
}
}
}
if (count === 0) {
results["ALLTESTS"] = {
status: false,
skipped: true
};
status = false;
print(RED + "No testcase matched the filter." + RESET);
}
results.status = status;
return {

View File

@ -0,0 +1,100 @@
/* global describe, it */
'use strict';
const expect = require('chai').expect;
const sinon = require('sinon');
const statuses = require('statuses');
const path = require('path');
const fs = require('fs');
const internal = require('internal');
const crypto = require('@arangodb/crypto');
const SyntheticResponse = require('@arangodb/foxx/router/response');
describe('SyntheticResponse', function () {
describe('cookie', function () {
it('adds a cookie', function () {
require("console").log('adds a cookie');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana');
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'}
]);
});
it('optionally adds a TTL', function () {
require("console").log('optionally adds a TTL');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {ttl: 22});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana', lifeTime: 22}
]);
});
it('optionally adds some metadata', function () {
require("console").log('optionally adds some metadata');
const rawRes = {};
require("console").log("1");
const res = new SyntheticResponse(rawRes, {});
require("console").log("2");
res.cookie('hello', 'banana', {
path: '/path',
domain: 'cats.example',
secure: true,
httpOnly: true
});
require("console").log("3");
expect(rawRes.cookies).to.eql([
{
name: 'hello',
value: 'banana',
path: '/path',
domain: 'cats.example',
secure: true,
httpOnly: true
}
]);
require("console").log("4");
});
it('supports signed cookies when a secret is provided', function () {
require("console").log('supports signed cookies when a secret is provided');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {secret: 'potato'});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
]);
});
it('supports signed cookies with different algorithms', function () {
require("console").log('supports signed cookies with different algorithms');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {
secret: 'potato',
algorithm: 'sha512'
});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana', 'sha512')}
]);
});
it('treats options string as a secret', function () {
require("console").log('treats options string as a secret');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', 'potato');
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
]);
});
it('treats options number as a TTL value', function () {
require("console").log('treats options number as a TTL value');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', 22);
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana', lifeTime: 22}
]);
});
});
});

View File

@ -694,91 +694,4 @@ describe('SyntheticResponse', function () {
expect(res.headers).to.have.a.property('vary', '*');
});
});
describe('cookie', function () {
it('adds a cookie', function () {
require("console").log('adds a cookie');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana');
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'}
]);
});
it('optionally adds a TTL', function () {
require("console").log('optionally adds a TTL');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {ttl: 22});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana', lifeTime: 22}
]);
});
it('optionally adds some metadata', function () {
require("console").log('optionally adds some metadata');
const rawRes = {};
require("console").log("1");
const res = new SyntheticResponse(rawRes, {});
require("console").log("2");
res.cookie('hello', 'banana', {
path: '/path',
domain: 'cats.example',
secure: true,
httpOnly: true
});
require("console").log("3");
expect(rawRes.cookies).to.eql([
{
name: 'hello',
value: 'banana',
path: '/path',
domain: 'cats.example',
secure: true,
httpOnly: true
}
]);
require("console").log("4");
});
it('supports signed cookies when a secret is provided', function () {
require("console").log('supports signed cookies when a secret is provided');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {secret: 'potato'});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
]);
});
it('supports signed cookies with different algorithms', function () {
require("console").log('supports signed cookies with different algorithms');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', {
secret: 'potato',
algorithm: 'sha512'
});
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana', 'sha512')}
]);
});
it('treats options string as a secret', function () {
require("console").log('treats options string as a secret');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', 'potato');
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana'},
{name: 'hello.sig', value: crypto.hmac('potato', 'banana')}
]);
});
it('treats options number as a TTL value', function () {
require("console").log('treats options number as a TTL value');
const rawRes = {};
const res = new SyntheticResponse(rawRes, {});
res.cookie('hello', 'banana', 22);
expect(rawRes.cookies).to.eql([
{name: 'hello', value: 'banana', lifeTime: 22}
]);
});
});
});

View File

@ -68,7 +68,7 @@ class ApplicationFeature {
// enable or disable a feature
void setEnabled(bool value) {
if (!value && !isOptional() && _enableWith.empty()) {
if (!value && !isOptional()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_BAD_PARAMETER,
"cannot disable non-optional feature '" + name() + "'");
@ -76,9 +76,6 @@ class ApplicationFeature {
_enabled = value;
}
// return whether a feature is automatically enabled with another feature
std::string enableWith() const { return _enableWith; }
// names of features required to be enabled for this feature to be enabled
std::vector<std::string> const& requires() const { return _requires; }
@ -141,12 +138,6 @@ class ApplicationFeature {
// make the feature optional (or not)
void setOptional(bool value) { _optional = value; }
// enable this feature automatically when another is enabled
void enableWith(std::string const& other) {
_enableWith = other;
_requires.emplace_back(other);
}
// note that this feature requires another to be present
void requires(std::string const& other) { _requires.emplace_back(other); }
@ -161,6 +152,13 @@ class ApplicationFeature {
// determine all direct and indirect ancestors of a feature
std::unordered_set<std::string> ancestors() const;
void onlyEnabledWith(std::string const& other) { _onlyEnabledWith.emplace(other); }
// return the list of other features that this feature depends on
std::unordered_set<std::string> const& onlyEnabledWith() const {
return _onlyEnabledWith;
}
private:
// set a feature's state. this method should be called by the
// application server only
@ -182,14 +180,14 @@ class ApplicationFeature {
// is enabled
std::vector<std::string> _requires;
// name of other feature that will enable or disable this feature
std::string _enableWith;
// a list of start dependencies for the feature
std::unordered_set<std::string> _startsAfter;
// list of direct and indirect ancestors of the feature
std::unordered_set<std::string> _ancestors;
// enable this feature only if the following other features are enabled
std::unordered_set<std::string> _onlyEnabledWith;
// state of feature
ApplicationServer::FeatureState _state;

View File

@ -181,12 +181,13 @@ void ApplicationServer::run(int argc, char* argv[]) {
reportServerProgress(_state);
validateOptions();
// enable automatic features
enableAutomaticFeatures();
// setup and validate all feature dependencies
setupDependencies(true);
// turn off all features that depend on other features that have been
// turned off
disableDependentFeatures();
// allows process control
daemonize();
@ -198,6 +199,11 @@ void ApplicationServer::run(int argc, char* argv[]) {
_state = ServerState::IN_PREPARE;
reportServerProgress(_state);
prepare();
// turn off all features that depend on other features that have been
// turned off. we repeat this to allow features to turn other features
// off even in the prepare phase
disableDependentFeatures();
// permanently drop the privileges
dropPrivilegesPermanently();
@ -346,28 +352,6 @@ void ApplicationServer::validateOptions() {
}
}
void ApplicationServer::enableAutomaticFeatures() {
bool changed;
do {
changed = false;
for (auto& it : _features) {
auto other = it.second->enableWith();
if (other.empty()) {
continue;
}
if (!this->exists(other)) {
fail("feature '" + it.second->name() +
"' depends on unknown feature '" + other + "'");
}
bool otherIsEnabled = this->feature(other)->isEnabled();
if (otherIsEnabled != it.second->isEnabled()) {
it.second->setEnabled(otherIsEnabled);
changed = true;
}
}
} while (changed);
}
// setup and validate all feature dependencies, determine feature order
void ApplicationServer::setupDependencies(bool failOnMissing) {
LOG_TOPIC(TRACE, Logger::STARTUP)
@ -469,6 +453,35 @@ void ApplicationServer::daemonize() {
}
}
void ApplicationServer::disableDependentFeatures() {
LOG_TOPIC(TRACE, Logger::STARTUP) << "ApplicationServer::disableDependentFeatures";
for (auto feature : _orderedFeatures) {
auto const& onlyEnabledWith = feature->onlyEnabledWith();
if (!feature->isEnabled() || onlyEnabledWith.empty()) {
continue;
}
for (auto const& other : onlyEnabledWith) {
ApplicationFeature* f = lookupFeature(other);
if (f == nullptr) {
LOG_TOPIC(TRACE, Logger::STARTUP) << "turning off feature '" << feature->name()
<< "' because it is enabled only in conjunction with non-existing feature '"
<< f->name() << "'";
feature->disable();
break;
} else if (!f->isEnabled()) {
LOG_TOPIC(TRACE, Logger::STARTUP) << "turning off feature '" << feature->name()
<< "' because it is enabled only in conjunction with disabled feature '"
<< f->name() << "'";
feature->disable();
break;
}
}
}
}
void ApplicationServer::prepare() {
LOG_TOPIC(TRACE, Logger::STARTUP) << "ApplicationServer::prepare";

View File

@ -247,15 +247,16 @@ class ApplicationServer {
// allows features to cross-validate their program options
void validateOptions();
// enable automatic features
void enableAutomaticFeatures();
// setup and validate all feature dependencies, determine feature order
void setupDependencies(bool failOnMissing);
// allows process control
void daemonize();
// disables all features that depend on other features, which, themselves
// are disabled
void disableDependentFeatures();
// allows features to prepare themselves
void prepare();

View File

@ -40,7 +40,8 @@ using namespace arangodb::cache;
TEST_CASE("cache::Table", "[cache]") {
SECTION("test static allocation size method") {
for (uint32_t i = Table::minLogSize; i <= Table::maxLogSize; i++) {
REQUIRE(Table::allocationSize(i) == (sizeof(Table) + (BUCKET_SIZE << i)));
REQUIRE(Table::allocationSize(i) ==
(sizeof(Table) + (BUCKET_SIZE << i) + Table::padding));
}
}
@ -48,7 +49,8 @@ TEST_CASE("cache::Table", "[cache]") {
for (uint32_t i = Table::minLogSize; i <= 20; i++) {
auto table = std::make_shared<Table>(i);
REQUIRE(table.get() != nullptr);
REQUIRE(table->memoryUsage() == (sizeof(Table) + (BUCKET_SIZE << i)));
REQUIRE(table->memoryUsage() ==
(sizeof(Table) + (BUCKET_SIZE << i) + Table::padding));
REQUIRE(table->logSize() == i);
REQUIRE(table->size() == (static_cast<uint64_t>(1) << i));
}