mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'engine-api' of https://github.com/arangodb/arangodb into engine-api
# Conflicts: # arangod/RocksDBEngine/RocksDBEdgeIndex.cpp
This commit is contained in:
commit
e0e00b0b6b
|
@ -111,6 +111,8 @@ devel
|
|||
|
||||
* Foxx: Fix arangoUser sometimes not being set correctly
|
||||
|
||||
* fixed issue #1974
|
||||
|
||||
|
||||
v3.2.alpha2 (2017-02-20)
|
||||
------------------------
|
||||
|
@ -184,6 +186,9 @@ v3.1.18 (2017-XX-XX)
|
|||
more complicated queries, the maxDepth limit of 1 was not considered strictly
|
||||
enough, causing the traverser to do unlimited depth searches.
|
||||
|
||||
* fixed issue #2415
|
||||
|
||||
|
||||
v3.1.17 (2017-04-04)
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"author": "ArangoDB GmbH",
|
||||
"description": "Official AQL manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general", "ga"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "sitemap-general", "ga"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -23,10 +23,6 @@
|
|||
"js": ["styles/header.js"],
|
||||
"css": ["styles/header.css"]
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/AQL/"
|
||||
},
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"author": "ArangoDB GmbH",
|
||||
"description": "Official HTTP API manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general", "ga"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "sitemap-general", "ga"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -23,10 +23,6 @@
|
|||
"js": ["styles/header.js"],
|
||||
"css": ["styles/header.css"]
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/HTTP/"
|
||||
},
|
||||
|
|
|
@ -5,7 +5,7 @@ Foxx services can define configuration parameters to make them more re-usable.
|
|||
|
||||
The `configuration` object maps names to configuration parameters:
|
||||
|
||||
* The key is the name under whicht the parameter will be available
|
||||
* The key is the name under which the parameter will be available
|
||||
on the [service context's](Context.md) `configuration` property.
|
||||
|
||||
* The value is a parameter definition.
|
||||
|
|
|
@ -87,7 +87,7 @@ When a service uses another mounted service as a dependency the dependency's `ma
|
|||
|
||||
Service A and Service B are mounted in the same database.
|
||||
Service B has a dependency with the local alias `"greeter"`.
|
||||
The dependency is configured to use the mount path of Service B.
|
||||
The dependency is configured to use the mount path of Service A.
|
||||
|
||||
```js
|
||||
// Entry file of Service A
|
||||
|
|
|
@ -11,10 +11,10 @@ of documents. This skiplist is then used in queries to locate documents
|
|||
within a given range. If the skiplist is declared unique, then no two documents are
|
||||
allowed to have the same set of attribute values.
|
||||
|
||||
Creating a new document or updating a document will fail if the uniqueness is violated.
|
||||
If the skiplist index is declared sparse, a document will be excluded from the index and no
|
||||
uniqueness checks will be performed if any index attribute value is not set or has a value
|
||||
of `null`.
|
||||
Creating a new document or updating a document will fail if the uniqueness is violated.
|
||||
If the skiplist index is declared sparse, a document will be excluded from the index and no
|
||||
uniqueness checks will be performed if any index attribute value is not set or has a value
|
||||
of `null`.
|
||||
|
||||
Accessing Skiplist Indexes from the Shell
|
||||
-----------------------------------------
|
||||
|
@ -30,16 +30,16 @@ Creates a unique skiplist index on all documents using *field1*, ... *fieldn*
|
|||
as attribute paths. At least one attribute path has to be given. The index will
|
||||
be non-sparse by default.
|
||||
|
||||
All documents in the collection must differ in terms of the indexed
|
||||
All documents in the collection must differ in terms of the indexed
|
||||
attributes. Creating a new document or updating an existing document will
|
||||
will fail if the attribute uniqueness is violated.
|
||||
fail if the attribute uniqueness is violated.
|
||||
|
||||
To create a sparse unique index, set the *sparse* attribute to `true`:
|
||||
|
||||
`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ], unique: true, sparse: true })`
|
||||
|
||||
In a sparse index all documents will be excluded from the index that do not
|
||||
contain at least one of the specified index attributes or that have a value
|
||||
In a sparse index all documents will be excluded from the index that do not
|
||||
contain at least one of the specified index attributes or that have a value
|
||||
of `null` in any of the specified index attributes. Such documents will
|
||||
not be indexed, and not be taken into account for uniqueness checks.
|
||||
|
||||
|
@ -61,7 +61,7 @@ details, including the index-identifier, is returned.
|
|||
~db._drop("ids");
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock ensureUniqueSkiplistSingle
|
||||
|
||||
|
||||
@startDocuBlockInline ensureUniqueSkiplistMultiColumn
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{ensureUniqueSkiplistMultiColumn}
|
||||
~db._create("ids");
|
||||
|
@ -86,7 +86,9 @@ Creates a non-unique skiplist index on all documents using *field1*, ...
|
|||
*fieldn* as attribute paths. At least one attribute path has to be given.
|
||||
The index will be non-sparse by default.
|
||||
|
||||
To create a sparse unique index, set the *sparse* attribute to `true`.
|
||||
To create a sparse non-unique index, set the *sparse* attribute to `true`.
|
||||
|
||||
`collection.ensureIndex({ type: "skiplist", fields: [ "field1", ..., "fieldn" ], sparse: true })`
|
||||
|
||||
In case that the index was successfully created, an object with the index
|
||||
details, including the index-identifier, is returned.
|
||||
|
@ -136,7 +138,7 @@ details, including the index-identifier, is returned.
|
|||
Constructs a query-by-example using a skiplist index:
|
||||
`collection.byExample(example)`
|
||||
|
||||
Selects all documents from the collection that match the specified example
|
||||
Selects all documents from the collection that match the specified example
|
||||
and returns a cursor. A skiplist index will be used if present.
|
||||
|
||||
You can use *toArray*, *next*, or *hasNext* to access the
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"author": "ArangoDB GmbH",
|
||||
"description": "Official manual for ArangoDB - the multi-model NoSQL database",
|
||||
"language": "en",
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "piwik", "sitemap-general", "ga", "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git"],
|
||||
"plugins":["-search", "-lunr", "-sharing", "toggle-chapters", "addcssjs", "anchorjs", "sitemap-general", "ga", "callouts@git+https://github.com/Simran-B/gitbook-plugin-callouts.git"],
|
||||
"pdf": {
|
||||
"fontSize": 12,
|
||||
"toc": true,
|
||||
|
@ -23,10 +23,6 @@
|
|||
"js": ["styles/header.js"],
|
||||
"css": ["styles/header.css"]
|
||||
},
|
||||
"piwik": {
|
||||
"URL": "www.arangodb.com/piwik/",
|
||||
"siteId": 12
|
||||
},
|
||||
"sitemap-general": {
|
||||
"prefix": "https://docs.arangodb.com/devel/Manual/"
|
||||
},
|
||||
|
|
|
@ -6,9 +6,11 @@ if python -c "import sys ; sys.exit(sys.platform != 'cygwin')"; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
OSNAME=linux
|
||||
isCygwin=0
|
||||
if test "`uname -o||true`" == "Cygwin"; then
|
||||
isCygwin=1
|
||||
OSNAME=windows
|
||||
fi
|
||||
|
||||
SED=sed
|
||||
|
@ -16,6 +18,7 @@ isMac=0
|
|||
if test "`uname`" == "Darwin"; then
|
||||
isMac=1
|
||||
SED=gsed
|
||||
OSNAME=darwin
|
||||
fi
|
||||
|
||||
# debian mac
|
||||
|
@ -370,6 +373,10 @@ while [ $# -gt 0 ]; do
|
|||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DOPENSSL_USE_STATIC_LIBS=TRUE"
|
||||
;;
|
||||
|
||||
--downloadStarter)
|
||||
shift
|
||||
DOWNLOAD_STARTER=1
|
||||
;;
|
||||
|
||||
--enterprise)
|
||||
shift
|
||||
|
@ -602,7 +609,24 @@ if test -n "${ENTERPRISE_GIT_URL}" ; then
|
|||
)
|
||||
fi
|
||||
|
||||
|
||||
if test ${DOWNLOAD_STARTER} == 1; then
|
||||
# we utilize https://developer.github.com/v3/repos/ to get the newest release:
|
||||
STARTER_REV=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases |grep tag_name |head -n 1 |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
STARTER_URL=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases/tags/${STARTER_REV} |grep browser_download_url |grep "${OSNAME}" |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
if test -n "${STARTER_URL}"; then
|
||||
curl -LO "${STARTER_URL}"
|
||||
FN=`echo ${STARTER_URL} |${SED} "s;.*/;;"`
|
||||
if test "${isCygwin}" == 1; then
|
||||
TN=arangodb.exe
|
||||
else
|
||||
TN=arangodb
|
||||
fi
|
||||
mkdir -p ${BUILD_DIR}
|
||||
mv ${FN} ${BUILD_DIR}/${TN}
|
||||
chmod a+x ${BUILD_DIR}/${TN}
|
||||
fi
|
||||
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DTHIRDPARTY_BIN=${BUILD_DIR}/${TN} "
|
||||
fi
|
||||
|
||||
test -d ${BUILD_DIR} || mkdir ${BUILD_DIR}
|
||||
cd ${BUILD_DIR}
|
||||
|
@ -693,7 +717,7 @@ if test -n "${TARGET_DIR}"; then
|
|||
DLLS=`find ${SSLDIR} -name \*.dll |grep -i release`
|
||||
cp ${DLLS} bin/${BUILD_CONFIG}
|
||||
cp bin/${BUILD_CONFIG}/* bin/
|
||||
cp tests/${BUILD_CONFIG}/* tests/
|
||||
cp tests/${BUILD_CONFIG}/*exe bin/
|
||||
fi
|
||||
tar -u -f ${TARFILE_TMP} \
|
||||
bin etc tests
|
||||
|
|
|
@ -189,6 +189,8 @@ function main(argv) {
|
|||
testOutputDirectory = 'out/';
|
||||
}
|
||||
|
||||
options.testOutputDirectory = testOutputDirectory;
|
||||
|
||||
// force json reply
|
||||
options.jsonReply = true;
|
||||
|
||||
|
|
|
@ -103,14 +103,19 @@ int GatherBlock::shutdown(int errorCode) {
|
|||
DEBUG_BEGIN_BLOCK();
|
||||
// don't call default shutdown method since it does the wrong thing to
|
||||
// _gatherBlockBuffer
|
||||
int ret = TRI_ERROR_NO_ERROR;
|
||||
for (auto it = _dependencies.begin(); it != _dependencies.end(); ++it) {
|
||||
int res = (*it)->shutdown(errorCode);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
ret = res;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret != TRI_ERROR_NO_ERROR) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!_isSimple) {
|
||||
for (std::deque<AqlItemBlock*>& x : _gatherBlockBuffer) {
|
||||
for (AqlItemBlock* y : x) {
|
||||
|
@ -506,7 +511,7 @@ bool GatherBlock::OurLessThan::operator()(std::pair<size_t, size_t> const& a,
|
|||
BlockWithClients::BlockWithClients(ExecutionEngine* engine,
|
||||
ExecutionNode const* ep,
|
||||
std::vector<std::string> const& shardIds)
|
||||
: ExecutionBlock(engine, ep), _nrClients(shardIds.size()) {
|
||||
: ExecutionBlock(engine, ep), _nrClients(shardIds.size()), _wasShutdown(false) {
|
||||
_shardIdMap.reserve(_nrClients);
|
||||
for (size_t i = 0; i < _nrClients; i++) {
|
||||
_shardIdMap.emplace(std::make_pair(shardIds[i], i));
|
||||
|
@ -542,7 +547,12 @@ int BlockWithClients::shutdown(int errorCode) {
|
|||
|
||||
_doneForClient.clear();
|
||||
|
||||
return ExecutionBlock::shutdown(errorCode);
|
||||
if (_wasShutdown) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
int res = ExecutionBlock::shutdown(errorCode);
|
||||
_wasShutdown = true;
|
||||
return res;
|
||||
|
||||
// cppcheck-suppress style
|
||||
DEBUG_END_BLOCK();
|
||||
|
@ -1371,19 +1381,21 @@ int RemoteBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
|
|||
int RemoteBlock::shutdown(int errorCode) {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
|
||||
if (!_isResponsibleForInitializeCursor) {
|
||||
// do nothing...
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
// For every call we simply forward via HTTP
|
||||
|
||||
std::unique_ptr<ClusterCommResult> res =
|
||||
sendRequest(rest::RequestType::PUT, "/_api/aql/shutdown/",
|
||||
std::string("{\"code\":" + std::to_string(errorCode) + "}"));
|
||||
if (throwExceptionAfterBadSyncRequest(res.get(), true)) {
|
||||
// artificially ignore error in case query was not found during shutdown
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
try {
|
||||
if (throwExceptionAfterBadSyncRequest(res.get(), true)) {
|
||||
// artificially ignore error in case query was not found during shutdown
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
} catch (arangodb::basics::Exception &ex) {
|
||||
if (ex.code() == TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE) {
|
||||
return TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
StringBuffer const& responseBodyBuf(res->result->getBody());
|
||||
|
|
|
@ -194,6 +194,9 @@ class BlockWithClients : public ExecutionBlock {
|
|||
/// @brief _doneForClient: the analogue of _done: _doneForClient.at(i) = true
|
||||
/// if we are done for the shard with clientId = i
|
||||
std::vector<bool> _doneForClient;
|
||||
|
||||
private:
|
||||
bool _wasShutdown;
|
||||
};
|
||||
|
||||
class ScatterBlock : public BlockWithClients {
|
||||
|
|
|
@ -1190,25 +1190,26 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
/// @brief shutdown, will be called exactly once for the whole query
|
||||
int ExecutionEngine::shutdown(int errorCode) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
if (_root != nullptr && !_wasShutdown) {
|
||||
// Take care of locking prevention measures in the cluster:
|
||||
if (_lockedShards != nullptr) {
|
||||
if (CollectionLockState::_noLockHeaders == _lockedShards) {
|
||||
CollectionLockState::_noLockHeaders = _previouslyLockedShards;
|
||||
}
|
||||
|
||||
delete _lockedShards;
|
||||
_lockedShards = nullptr;
|
||||
_previouslyLockedShards = nullptr;
|
||||
}
|
||||
|
||||
res = _root->shutdown(errorCode);
|
||||
|
||||
// prevent a duplicate shutdown
|
||||
int res = _root->shutdown(errorCode);
|
||||
_wasShutdown = true;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return res;
|
||||
}
|
||||
|
||||
/// @brief create an execution engine from a plan
|
||||
|
|
|
@ -1122,7 +1122,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
|
|||
|
||||
if (it2 == varInfo.end()) {
|
||||
// report an error here to prevent crashing
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "missing variable #" + std::to_string(v->id) + " (" + v->name + ") for node " + en->getTypeString() + " while planning registers");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, std::string("missing variable #") + std::to_string(v->id) + " (" + v->name + ") for node #" + std::to_string(en->id()) + " (" + en->getTypeString() + ") while planning registers");
|
||||
}
|
||||
|
||||
// finally adjust the variable inside the IN calculation
|
||||
|
|
|
@ -3808,6 +3808,10 @@ void arangodb::aql::inlineSubqueriesRule(Optimizer* opt,
|
|||
RedundantCalculationsReplacer finder(replacements);
|
||||
plan->root()->walk(&finder);
|
||||
|
||||
plan->clearVarUsageComputed();
|
||||
plan->invalidateCost();
|
||||
plan->findVarUsage();
|
||||
|
||||
// abort optimization
|
||||
current = nullptr;
|
||||
}
|
||||
|
|
|
@ -884,6 +884,9 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
|
|||
}
|
||||
}
|
||||
} catch (...) {
|
||||
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
||||
<< "got an exception executing "
|
||||
<< " this: " << (uintptr_t) this;
|
||||
delete value;
|
||||
throw;
|
||||
}
|
||||
|
|
|
@ -629,6 +629,9 @@ void ShortestPathNode::prepareOptions() {
|
|||
break;
|
||||
}
|
||||
}
|
||||
// If we use the path output the cache should activate document
|
||||
// caching otherwise it is not worth it.
|
||||
_options->activateCache(false);
|
||||
_optionsBuild = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -976,6 +976,9 @@ void TraversalNode::prepareOptions() {
|
|||
TRI_ASSERT(!_options->_baseVertexExpression->isV8());
|
||||
|
||||
}
|
||||
// If we use the path output the cache should activate document
|
||||
// caching otherwise it is not worth it.
|
||||
_options->activateCache(false);
|
||||
_optionsBuild = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -237,27 +237,39 @@ int Aqlparse (arangodb::aql::Parser* parser);
|
|||
|
||||
using namespace arangodb::aql;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief shortcut macro for signaling out of memory
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define ABORT_OOM \
|
||||
parser->registerError(TRI_ERROR_OUT_OF_MEMORY); \
|
||||
YYABORT;
|
||||
|
||||
#define scanner parser->scanner()
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief forward for lexer function defined in Aql/tokens.ll
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int Aqllex (YYSTYPE*,
|
||||
YYLTYPE*,
|
||||
void*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief register parse error
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void Aqlerror (YYLTYPE* locp,
|
||||
arangodb::aql::Parser* parser,
|
||||
char const* message) {
|
||||
parser->registerParseError(TRI_ERROR_QUERY_PARSE, message, locp->first_line, locp->first_column);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check if any of the variables used in the INTO expression were
|
||||
/// introduced by the COLLECT itself, in which case it would fail
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static Variable const* CheckIntoVariables(AstNode const* collectVars,
|
||||
std::unordered_set<Variable const*> const& vars) {
|
||||
if (collectVars == nullptr || collectVars->type != NODE_TYPE_ARRAY) {
|
||||
|
@ -280,7 +292,10 @@ static Variable const* CheckIntoVariables(AstNode const* collectVars,
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief register variables in the scope
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void RegisterAssignVariables(arangodb::aql::Scopes* scopes, AstNode const* vars) {
|
||||
size_t const n = vars->numMembers();
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
|
@ -294,7 +309,10 @@ static void RegisterAssignVariables(arangodb::aql::Scopes* scopes, AstNode const
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief validate the aggregate variables expressions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static bool ValidateAggregates(Parser* parser, AstNode const* aggregates) {
|
||||
size_t const n = aggregates->numMembers();
|
||||
|
||||
|
@ -328,7 +346,10 @@ static bool ValidateAggregates(Parser* parser, AstNode const* aggregates) {
|
|||
return true;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief start a new scope for the collect
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static bool StartCollectScope(arangodb::aql::Scopes* scopes) {
|
||||
// check if we are in the main scope
|
||||
if (scopes->type() == arangodb::aql::AQL_SCOPE_MAIN) {
|
||||
|
@ -342,7 +363,10 @@ static bool StartCollectScope(arangodb::aql::Scopes* scopes) {
|
|||
return true;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get the INTO variable stored in a node (may not exist)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static AstNode const* GetIntoVariable(Parser* parser, AstNode const* node) {
|
||||
if (node == nullptr) {
|
||||
return nullptr;
|
||||
|
@ -362,7 +386,10 @@ static AstNode const* GetIntoVariable(Parser* parser, AstNode const* node) {
|
|||
return parser->ast()->createNodeVariable(v->getStringValue(), v->getStringLength(), true);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get the INTO variable = expression stored in a node (may not exist)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static AstNode const* GetIntoExpression(AstNode const* node) {
|
||||
if (node == nullptr || node->type == NODE_TYPE_VALUE) {
|
||||
return nullptr;
|
||||
|
|
|
@ -345,6 +345,8 @@ SET(ARANGOD_SOURCES
|
|||
VocBase/TransactionManager.cpp
|
||||
VocBase/Traverser.cpp
|
||||
VocBase/TraverserCache.cpp
|
||||
VocBase/TraverserCacheFactory.cpp
|
||||
VocBase/TraverserDocumentCache.cpp
|
||||
VocBase/TraverserOptions.cpp
|
||||
VocBase/modes.cpp
|
||||
VocBase/replication-applier.cpp
|
||||
|
|
|
@ -35,28 +35,32 @@
|
|||
using ClusterEdgeCursor = arangodb::traverser::ClusterEdgeCursor;
|
||||
using StringRef = arangodb::StringRef;
|
||||
|
||||
ClusterEdgeCursor::ClusterEdgeCursor(StringRef vertexId, uint64_t depth,
|
||||
arangodb::traverser::ClusterTraverser* traverser)
|
||||
: _position(0), _resolver(traverser->_trx->resolver()), _traverser(traverser) {
|
||||
transaction::BuilderLeaser leased(traverser->_trx);
|
||||
|
||||
transaction::BuilderLeaser b(traverser->_trx);
|
||||
b->add(VPackValuePair(vertexId.data(), vertexId.length(), VPackValueType::String));
|
||||
ClusterEdgeCursor::ClusterEdgeCursor(
|
||||
StringRef vertexId, uint64_t depth,
|
||||
arangodb::traverser::ClusterTraverser* traverser)
|
||||
: _position(0),
|
||||
_resolver(traverser->_trx->resolver()),
|
||||
_traverser(traverser) {
|
||||
transaction::BuilderLeaser leased(traverser->_trx);
|
||||
|
||||
|
||||
fetchEdgesFromEngines(traverser->_dbname, traverser->_engines, b->slice(), depth,
|
||||
traverser->_edges, _edgeList, traverser->_datalake,
|
||||
*(leased.get()), traverser->_filteredPaths,
|
||||
traverser->_readDocuments);
|
||||
|
||||
}
|
||||
transaction::BuilderLeaser b(traverser->_trx);
|
||||
b->add(VPackValuePair(vertexId.data(), vertexId.length(),
|
||||
VPackValueType::String));
|
||||
|
||||
bool ClusterEdgeCursor::next(std::function<void(StringRef const&,
|
||||
VPackSlice, size_t)> callback) {
|
||||
fetchEdgesFromEngines(traverser->_dbname, traverser->_engines, b->slice(),
|
||||
depth, traverser->_edges, _edgeList,
|
||||
traverser->_datalake, *(leased.get()),
|
||||
traverser->_filteredPaths, traverser->_readDocuments);
|
||||
}
|
||||
|
||||
bool ClusterEdgeCursor::next(
|
||||
std::function<void(StringRef const&, VPackSlice, size_t)> callback) {
|
||||
if (_position < _edgeList.size()) {
|
||||
VPackSlice edge = _edgeList[_position];
|
||||
std::string eid = transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
|
||||
StringRef persId = _traverser->traverserCache()->persistString(StringRef(eid));
|
||||
std::string eid =
|
||||
transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
|
||||
StringRef persId =
|
||||
_traverser->traverserCache()->persistString(StringRef(eid));
|
||||
callback(persId, edge, _position);
|
||||
++_position;
|
||||
return true;
|
||||
|
@ -64,11 +68,13 @@ bool ClusterEdgeCursor::next(std::function<void(StringRef const&,
|
|||
return false;
|
||||
}
|
||||
|
||||
void ClusterEdgeCursor::readAll(std::function<void(StringRef const&,
|
||||
VPackSlice, size_t&)> callback) {
|
||||
void ClusterEdgeCursor::readAll(
|
||||
std::function<void(StringRef const&, VPackSlice, size_t&)> callback) {
|
||||
for (auto const& edge : _edgeList) {
|
||||
std::string eid = transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
|
||||
StringRef persId = _traverser->traverserCache()->persistString(StringRef(eid));
|
||||
std::string eid =
|
||||
transaction::helpers::extractIdString(_resolver, edge, VPackSlice());
|
||||
StringRef persId =
|
||||
_traverser->traverserCache()->persistString(StringRef(eid));
|
||||
callback(persId, edge, _position);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Graph/SingleServerEdgeCursor.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "VocBase/TraverserCache.h"
|
||||
#include "VocBase/TraverserCacheFactory.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
|
@ -155,14 +156,14 @@ BaseOptions::BaseOptions(transaction::Methods* trx)
|
|||
_trx(trx),
|
||||
_tmpVar(nullptr),
|
||||
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
|
||||
_cache(std::make_unique<TraverserCache>(_trx)) {}
|
||||
_cache(nullptr) {}
|
||||
|
||||
BaseOptions::BaseOptions(BaseOptions const& other)
|
||||
: _ctx(new aql::FixedVarExpressionContext()),
|
||||
_trx(other._trx),
|
||||
_tmpVar(nullptr),
|
||||
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
|
||||
_cache(std::make_unique<TraverserCache>(_trx)) {
|
||||
_cache(nullptr) {
|
||||
TRI_ASSERT(other._baseLookupInfos.empty());
|
||||
TRI_ASSERT(other._tmpVar == nullptr);
|
||||
}
|
||||
|
@ -173,7 +174,7 @@ BaseOptions::BaseOptions(arangodb::aql::Query* query, VPackSlice info,
|
|||
_trx(query->trx()),
|
||||
_tmpVar(nullptr),
|
||||
_isCoordinator(arangodb::ServerState::instance()->isCoordinator()),
|
||||
_cache(std::make_unique<TraverserCache>(_trx)) {
|
||||
_cache(nullptr) {
|
||||
VPackSlice read = info.get("tmpVar");
|
||||
if (!read.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
|
@ -386,3 +387,22 @@ EdgeCursor* BaseOptions::nextCursorLocal(ManagedDocumentResult* mmdr,
|
|||
}
|
||||
return allCursor.release();
|
||||
}
|
||||
|
||||
TraverserCache* BaseOptions::cache() {
|
||||
if (_cache == nullptr) {
|
||||
// If this assert is triggered the code should
|
||||
// have called activateCache() before
|
||||
TRI_ASSERT(false);
|
||||
// In production just gracefully initialize
|
||||
// the cache without document cache, s.t. system does not crash
|
||||
activateCache(false);
|
||||
}
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
return _cache.get();
|
||||
}
|
||||
|
||||
void BaseOptions::activateCache(bool enableDocumentCache) {
|
||||
// Do not call this twice.
|
||||
TRI_ASSERT(_cache == nullptr);
|
||||
_cache.reset(cacheFactory::CreateCache(_trx, enableDocumentCache));
|
||||
}
|
||||
|
|
|
@ -119,6 +119,10 @@ struct BaseOptions {
|
|||
/// @brief Estimate the total cost for this operation
|
||||
virtual double estimateCost(size_t& nrItems) const = 0;
|
||||
|
||||
traverser::TraverserCache* cache();
|
||||
|
||||
void activateCache(bool enableDocumentCache);
|
||||
|
||||
protected:
|
||||
double costForLookupInfoList(std::vector<LookupInfo> const& list,
|
||||
size_t& createItems) const;
|
||||
|
|
|
@ -96,11 +96,15 @@ arangodb::aql::AqlValue NeighborsEnumerator::lastVertexToAqlValue() {
|
|||
}
|
||||
|
||||
arangodb::aql::AqlValue NeighborsEnumerator::lastEdgeToAqlValue() {
|
||||
// TODO should return Optimizer failed
|
||||
// If we get here the optimizer decided we do NOT need edges.
|
||||
// But the Block asks for it.
|
||||
TRI_ASSERT(false);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
arangodb::aql::AqlValue NeighborsEnumerator::pathToAqlValue(arangodb::velocypack::Builder& result) {
|
||||
// TODO should return Optimizer failed
|
||||
// If we get here the optimizer decided we do NOT need paths
|
||||
// But the Block asks for it.
|
||||
TRI_ASSERT(false);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
|
|
@ -56,6 +56,8 @@ MMFilesPathBasedIndex::MMFilesPathBasedIndex(TRI_idx_iid_t iid,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TRI_ASSERT(baseSize > 0);
|
||||
|
||||
_allocator.reset(new FixedSizeAllocator(baseSize + sizeof(MMFilesIndexElementValue) * numPaths()));
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ bool MMFilesPersistentIndexIterator::next(TokenCallback const& cb, size_t limit)
|
|||
MMFilesPersistentIndex::MMFilesPersistentIndex(TRI_idx_iid_t iid,
|
||||
arangodb::LogicalCollection* collection,
|
||||
arangodb::velocypack::Slice const& info)
|
||||
: MMFilesPathBasedIndex(iid, collection, info, 0, true) {}
|
||||
: MMFilesPathBasedIndex(iid, collection, info, sizeof(TRI_voc_rid_t), true) {}
|
||||
|
||||
/// @brief destroy the index
|
||||
MMFilesPersistentIndex::~MMFilesPersistentIndex() {}
|
||||
|
|
|
@ -700,7 +700,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
if (other != nullptr) {
|
||||
TRI_voc_cid_t otherCid = other->cid();
|
||||
state->releaseCollection(otherCid);
|
||||
vocbase->dropCollection(other, true);
|
||||
vocbase->dropCollection(other, true, -1.0);
|
||||
}
|
||||
|
||||
int res = vocbase->renameCollection(collection, name, true);
|
||||
|
@ -968,7 +968,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
|
||||
if (collection != nullptr) {
|
||||
// drop an existing collection
|
||||
vocbase->dropCollection(collection, true);
|
||||
vocbase->dropCollection(collection, true, -1.0);
|
||||
}
|
||||
|
||||
MMFilesPersistentIndexFeature::dropCollection(databaseId, collectionId);
|
||||
|
@ -987,7 +987,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
TRI_voc_cid_t otherCid = collection->cid();
|
||||
|
||||
state->releaseCollection(otherCid);
|
||||
vocbase->dropCollection(collection, true);
|
||||
vocbase->dropCollection(collection, true, -1.0);
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
|
@ -1297,7 +1297,7 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
}
|
||||
|
||||
if (collection != nullptr) {
|
||||
vocbase->dropCollection(collection, true);
|
||||
vocbase->dropCollection(collection, true, -1.0);
|
||||
}
|
||||
MMFilesPersistentIndexFeature::dropCollection(databaseId, collectionId);
|
||||
break;
|
||||
|
|
|
@ -1786,7 +1786,7 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters,
|
|||
// regular collection
|
||||
setProgress("dropping " + collectionMsg);
|
||||
|
||||
int res = _vocbase->dropCollection(col, true);
|
||||
int res = _vocbase->dropCollection(col, true, -1.0);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "unable to drop " + collectionMsg + ": " +
|
||||
|
|
|
@ -530,7 +530,7 @@ int Syncer::dropCollection(VPackSlice const& slice, bool reportError) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
return _vocbase->dropCollection(col, true);
|
||||
return _vocbase->dropCollection(col, true, -1.0);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1475,7 +1475,7 @@ int RestReplicationHandler::processRestoreCollection(
|
|||
// drop an existing collection if it exists
|
||||
if (col != nullptr) {
|
||||
if (dropExisting) {
|
||||
Result res = _vocbase->dropCollection(col, true);
|
||||
Result res = _vocbase->dropCollection(col, true, -1.0);
|
||||
|
||||
if (res.errorNumber() == TRI_ERROR_FORBIDDEN) {
|
||||
// some collections must not be dropped
|
||||
|
|
|
@ -76,30 +76,50 @@ class RestReplicationHandler : public RestVocbaseBaseHandler {
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief insert the applier action into an action list
|
||||
/// (helper in loggerFollow)
|
||||
/// Saves a follow as synchronous up to lastTick
|
||||
/// lastTick == 0 => new Server
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void insertClient(TRI_voc_tick_t);
|
||||
void insertClient(TRI_voc_tick_t lastTick);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief determine chunk size from request
|
||||
/// Reads chunkSize attribute from request
|
||||
/// Requried for ReplicationDumpContext (MMFiles)
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
uint64_t determineChunkSize() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the state of the replication logger
|
||||
/// @route GET logger-state
|
||||
/// @caller Syncer::getMasterState
|
||||
/// @response VPackObject describing the ServerState in a certain point
|
||||
/// * state (server state)
|
||||
/// * server (version / id)
|
||||
/// * clients (list of followers)
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void handleCommandLoggerState();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the available logfile range
|
||||
/// @route GET logger-tick-ranges
|
||||
/// @caller js/client/modules/@arangodb/replication.js
|
||||
/// @response VPackArray, containing info about each datafile
|
||||
/// * filename
|
||||
/// * status
|
||||
/// * tickMin - tickMax
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void handleCommandLoggerTickRanges();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the first tick available in a logfile
|
||||
/// @route GET logger-first-tick
|
||||
/// @caller js/client/modules/@arangodb/replication.js
|
||||
/// @response VPackObject with minTick of LogfileManager->ranges()
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void handleCommandLoggerFirstTick();
|
||||
|
|
|
@ -187,10 +187,11 @@ size_t RocksDBEdgeIndex::memory() const {
|
|||
/// @brief return a VelocyPack representation of the index
|
||||
void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder,
|
||||
bool withFigures) const {
|
||||
TRI_ASSERT(builder.isOpenArray() || builder.isEmpty());
|
||||
//get data that needs to be modified
|
||||
builder.openObject();
|
||||
RocksDBIndex::toVelocyPack(builder, withFigures);
|
||||
// add slectivity estimate hard-coded
|
||||
builder.add("unique", VPackValue(false));
|
||||
builder.add("sparse", VPackValue(false));
|
||||
builder.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -846,6 +846,7 @@ static void JS_DropVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
}
|
||||
|
||||
bool allowDropSystem = false;
|
||||
double timeout = -1.0; // forever, unless specified otherwise
|
||||
if (args.Length() > 0) {
|
||||
// options
|
||||
if (args[0]->IsObject()) {
|
||||
|
@ -855,12 +856,16 @@ static void JS_DropVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
if (optionsObject->Has(IsSystemKey)) {
|
||||
allowDropSystem = TRI_ObjectToBoolean(optionsObject->Get(IsSystemKey));
|
||||
}
|
||||
TRI_GET_GLOBAL_STRING(TimeoutKey);
|
||||
if (optionsObject->Has(TimeoutKey)) {
|
||||
timeout = TRI_ObjectToDouble(optionsObject->Get(TimeoutKey));
|
||||
}
|
||||
} else {
|
||||
allowDropSystem = TRI_ObjectToBoolean(args[0]);
|
||||
}
|
||||
}
|
||||
|
||||
int res = collection->vocbase()->dropCollection(collection, allowDropSystem);
|
||||
int res = collection->vocbase()->dropCollection(collection, allowDropSystem, timeout);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(res, "cannot drop collection");
|
||||
|
|
|
@ -26,15 +26,10 @@
|
|||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
|
||||
#include "Cache/Common.h"
|
||||
#include "Cache/Cache.h"
|
||||
#include "Cache/CacheManagerFeature.h"
|
||||
#include "Cache/Finding.h"
|
||||
|
||||
#include "Aql/AqlValue.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "Aql/AqlValue.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
@ -44,33 +39,12 @@ using namespace arangodb;
|
|||
using namespace arangodb::traverser;
|
||||
|
||||
TraverserCache::TraverserCache(transaction::Methods* trx)
|
||||
: _cache(nullptr), _mmdr(new ManagedDocumentResult{}),
|
||||
: _mmdr(new ManagedDocumentResult{}),
|
||||
_trx(trx), _insertedDocuments(0),
|
||||
_stringHeap(new StringHeap{4096}) /* arbitrary block-size may be adjusted for perforamnce */ {
|
||||
auto cacheManager = CacheManagerFeature::MANAGER;
|
||||
TRI_ASSERT(cacheManager != nullptr);
|
||||
_cache = cacheManager->createCache(cache::CacheType::Plain);
|
||||
}
|
||||
|
||||
TraverserCache::~TraverserCache() {
|
||||
if (_cache != nullptr) {
|
||||
auto cacheManager = CacheManagerFeature::MANAGER;
|
||||
cacheManager->destroyCache(_cache);
|
||||
}
|
||||
}
|
||||
|
||||
// @brief Only for internal use, Cache::Finding prevents
|
||||
// the cache from removing this specific object. Should not be retained
|
||||
// for a longer period of time.
|
||||
// DO NOT give it to a caller.
|
||||
cache::Finding TraverserCache::lookup(StringRef idString) {
|
||||
// Caller should check before.
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
VPackValueLength keySize = idString.length();
|
||||
void const* key = idString.data();
|
||||
//uint32_t keySize = static_cast<uint32_t>(idString.byteSize());
|
||||
return _cache->find(key, (uint32_t)keySize);
|
||||
}
|
||||
TraverserCache::~TraverserCache() {}
|
||||
|
||||
VPackSlice TraverserCache::lookupInCollection(StringRef id) {
|
||||
size_t pos = id.find('/');
|
||||
|
@ -95,96 +69,27 @@ VPackSlice TraverserCache::lookupInCollection(StringRef id) {
|
|||
} else {
|
||||
result = VPackSlice(_mmdr->vpack());
|
||||
}
|
||||
|
||||
void const* key = id.begin();
|
||||
VPackValueLength keySize = id.length();
|
||||
|
||||
void const* resVal = result.begin();
|
||||
uint64_t resValSize = static_cast<uint64_t>(result.byteSize());
|
||||
std::unique_ptr<cache::CachedValue> value(
|
||||
cache::CachedValue::construct(key, (uint32_t)keySize, resVal, resValSize));
|
||||
|
||||
if (value && _cache != nullptr) {
|
||||
bool success = _cache->insert(value.get());
|
||||
if (!success) {
|
||||
LOG_TOPIC(DEBUG, Logger::GRAPHS) << "Insert failed";
|
||||
} else {
|
||||
// Cache is responsible.
|
||||
// If this failed, well we do not store it and read it again next time.
|
||||
value.release();
|
||||
}
|
||||
}
|
||||
++_insertedDocuments;
|
||||
return result;
|
||||
}
|
||||
|
||||
void TraverserCache::insertIntoResult(StringRef idString,
|
||||
VPackBuilder& builder) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
VPackSlice slice(val->value());
|
||||
// finding makes sure that slice contant stays valid.
|
||||
builder.add(slice);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
builder.add(lookupInCollection(idString));
|
||||
}
|
||||
|
||||
aql::AqlValue TraverserCache::fetchAqlResult(StringRef idString) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
// finding makes sure that slice content stays valid.
|
||||
return aql::AqlValue(VPackSlice(val->value()));
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
return aql::AqlValue(lookupInCollection(idString));
|
||||
}
|
||||
|
||||
void TraverserCache::insertDocument(StringRef idString, arangodb::velocypack::Slice const& document) {
|
||||
if (_cache == nullptr || !lookup(idString).found()) {
|
||||
// Really fetch document
|
||||
VPackValueLength keySize = idString.length();
|
||||
void const* key = idString.data();
|
||||
|
||||
void const* resVal = document.begin();
|
||||
uint64_t resValSize = static_cast<uint64_t>(document.byteSize());
|
||||
std::unique_ptr<cache::CachedValue> value(cache::CachedValue::construct(key, (uint32_t)keySize,
|
||||
resVal, resValSize));
|
||||
|
||||
if (value && _cache != nullptr) {
|
||||
bool success = _cache->insert(value.get());
|
||||
if (!success) {
|
||||
LOG_TOPIC(DEBUG, Logger::GRAPHS) << "Insert document into cache failed";
|
||||
} else {
|
||||
// Cache is responsible.
|
||||
// If this failed, well we do not store it and read it again next time.
|
||||
value.release();
|
||||
}
|
||||
}
|
||||
++_insertedDocuments;
|
||||
}
|
||||
++_insertedDocuments;
|
||||
return;
|
||||
}
|
||||
|
||||
bool TraverserCache::validateFilter(
|
||||
StringRef idString,
|
||||
std::function<bool(VPackSlice const&)> filterFunc) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
VPackSlice slice(val->value());
|
||||
// finding makes sure that slice contant stays valid.
|
||||
return filterFunc(slice);
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
VPackSlice slice = lookupInCollection(idString);
|
||||
return filterFunc(slice);
|
||||
}
|
||||
|
|
|
@ -30,11 +30,6 @@ namespace arangodb {
|
|||
class ManagedDocumentResult;
|
||||
class StringHeap;
|
||||
|
||||
namespace cache {
|
||||
class Cache;
|
||||
class Finding;
|
||||
}
|
||||
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
|
@ -54,47 +49,45 @@ class TraverserCache {
|
|||
public:
|
||||
explicit TraverserCache(transaction::Methods* trx);
|
||||
|
||||
~TraverserCache();
|
||||
virtual ~TraverserCache();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Inserts the real document stored within the token
|
||||
/// into the given builder.
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
/// The document will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void insertIntoResult(StringRef idString,
|
||||
arangodb::velocypack::Builder& builder);
|
||||
virtual void insertIntoResult(StringRef idString,
|
||||
arangodb::velocypack::Builder& builder);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Return AQL value containing the result
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
/// The document will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
aql::AqlValue fetchAqlResult(StringRef idString);
|
||||
virtual aql::AqlValue fetchAqlResult(StringRef idString);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Insert value into store
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void insertDocument(StringRef idString,
|
||||
arangodb::velocypack::Slice const& document);
|
||||
|
||||
virtual void insertDocument(StringRef idString,
|
||||
arangodb::velocypack::Slice const& document);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Throws the document referenced by the token into the filter
|
||||
/// function and returns it result.
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
/// The document will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool validateFilter(StringRef idString,
|
||||
std::function<bool(arangodb::velocypack::Slice const&)> filterFunc);
|
||||
virtual bool validateFilter(StringRef idString,
|
||||
std::function<bool(arangodb::velocypack::Slice const&)> filterFunc);
|
||||
|
||||
size_t getAndResetInsertedDocuments() {
|
||||
size_t tmp = _insertedDocuments;
|
||||
_insertedDocuments = 0;
|
||||
return tmp;
|
||||
}
|
||||
size_t getAndResetInsertedDocuments() {
|
||||
size_t tmp = _insertedDocuments;
|
||||
_insertedDocuments = 0;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Persist the given id string. The return value is guaranteed to
|
||||
|
@ -102,18 +95,10 @@ class TraverserCache {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
StringRef persistString(StringRef const idString);
|
||||
|
||||
private:
|
||||
protected:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Lookup a document by token in the cache.
|
||||
/// As long as finding is retained it is guaranteed that the result
|
||||
/// stays valid. Finding should not be retained very long, if it is
|
||||
/// needed for longer, copy the value.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
cache::Finding lookup(StringRef idString);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Lookup a document from the database and insert it into the cache.
|
||||
/// @brief Lookup a document from the database.
|
||||
/// The Slice returned here is only valid until the NEXT call of this
|
||||
/// function.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -121,10 +106,7 @@ class TraverserCache {
|
|||
arangodb::velocypack::Slice lookupInCollection(
|
||||
StringRef idString);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief The hash-cache that saves documents found in the Database
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::shared_ptr<arangodb::cache::Cache> _cache;
|
||||
protected:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Reusable ManagedDocumentResult that temporarily takes
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "TraverserCacheFactory.h"
|
||||
|
||||
#include "Logger/Logger.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/TraverserCache.h"
|
||||
#include "VocBase/TraverserDocumentCache.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::traverser;
|
||||
using namespace arangodb::traverser::cacheFactory;
|
||||
|
||||
TraverserCache* cacheFactory::CreateCache(
|
||||
arangodb::transaction::Methods* trx, bool activateDocumentCache) {
|
||||
if (activateDocumentCache) {
|
||||
return new TraverserDocumentCache(trx);
|
||||
}
|
||||
return new TraverserCache(trx);
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_VOC_BASE_TRAVERSER_CACHE_FACTORY_H
|
||||
#define ARANGOD_VOC_BASE_TRAVERSER_CACHE_FACTORY_H 1
|
||||
|
||||
namespace arangodb {
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
namespace traverser {
|
||||
class TraverserCache;
|
||||
|
||||
namespace cacheFactory {
|
||||
TraverserCache* CreateCache(arangodb::transaction::Methods* trx,
|
||||
bool activateDocumentCache);
|
||||
} // namespace cacheFactory
|
||||
} // namespace traverser
|
||||
} // namespace arangodb
|
||||
#endif
|
|
@ -0,0 +1,165 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "TraverserDocumentCache.h"
|
||||
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
|
||||
#include "Aql/AqlValue.h"
|
||||
|
||||
#include "Cache/Common.h"
|
||||
#include "Cache/Cache.h"
|
||||
#include "Cache/CacheManagerFeature.h"
|
||||
#include "Cache/Finding.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::traverser;
|
||||
|
||||
TraverserDocumentCache::TraverserDocumentCache(transaction::Methods* trx)
|
||||
: TraverserCache(trx), _cache(nullptr) {
|
||||
auto cacheManager = CacheManagerFeature::MANAGER;
|
||||
TRI_ASSERT(cacheManager != nullptr);
|
||||
_cache = cacheManager->createCache(cache::CacheType::Plain);
|
||||
}
|
||||
|
||||
TraverserDocumentCache::~TraverserDocumentCache() {
|
||||
if (_cache != nullptr) {
|
||||
auto cacheManager = CacheManagerFeature::MANAGER;
|
||||
cacheManager->destroyCache(_cache);
|
||||
}
|
||||
}
|
||||
|
||||
// @brief Only for internal use, Cache::Finding prevents
|
||||
// the cache from removing this specific object. Should not be retained
|
||||
// for a longer period of time.
|
||||
// DO NOT give it to a caller.
|
||||
cache::Finding TraverserDocumentCache::lookup(StringRef idString) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
VPackValueLength keySize = idString.length();
|
||||
void const* key = idString.data();
|
||||
// uint32_t keySize = static_cast<uint32_t>(idString.byteSize());
|
||||
return _cache->find(key, (uint32_t)keySize);
|
||||
}
|
||||
|
||||
VPackSlice TraverserDocumentCache::lookupAndCache(StringRef id) {
|
||||
VPackSlice result = lookupInCollection(id);
|
||||
if (_cache != nullptr) {
|
||||
void const* key = id.begin();
|
||||
auto keySize = static_cast<uint32_t>(id.length());
|
||||
|
||||
void const* resVal = result.begin();
|
||||
uint64_t resValSize = static_cast<uint64_t>(result.byteSize());
|
||||
std::unique_ptr<cache::CachedValue> value(
|
||||
cache::CachedValue::construct(key, keySize, resVal, resValSize));
|
||||
|
||||
if (value) {
|
||||
bool success = _cache->insert(value.get());
|
||||
if (!success) {
|
||||
LOG_TOPIC(DEBUG, Logger::GRAPHS) << "Insert failed";
|
||||
} else {
|
||||
// Cache is responsible.
|
||||
// If this failed, well we do not store it and read it again next time.
|
||||
value.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void TraverserDocumentCache::insertIntoResult(StringRef idString,
|
||||
VPackBuilder& builder) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
VPackSlice slice(val->value());
|
||||
// finding makes sure that slice contant stays valid.
|
||||
builder.add(slice);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
builder.add(lookupAndCache(idString));
|
||||
}
|
||||
|
||||
aql::AqlValue TraverserDocumentCache::fetchAqlResult(StringRef idString) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
VPackSlice slice(val->value());
|
||||
// finding makes sure that slice contant stays valid.
|
||||
return aql::AqlValue(slice);
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
return aql::AqlValue(lookupAndCache(idString));
|
||||
}
|
||||
|
||||
void TraverserDocumentCache::insertDocument(
|
||||
StringRef idString, arangodb::velocypack::Slice const& document) {
|
||||
++_insertedDocuments;
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (!finding.found()) {
|
||||
void const* key = idString.data();
|
||||
auto keySize = static_cast<uint32_t>(idString.length());
|
||||
|
||||
void const* resVal = document.begin();
|
||||
uint64_t resValSize = static_cast<uint64_t>(document.byteSize());
|
||||
std::unique_ptr<cache::CachedValue> value(
|
||||
cache::CachedValue::construct(key, keySize, resVal, resValSize));
|
||||
|
||||
if (value) {
|
||||
bool success = _cache->insert(value.get());
|
||||
if (!success) {
|
||||
LOG_TOPIC(DEBUG, Logger::GRAPHS) << "Insert document into cache failed";
|
||||
} else {
|
||||
// Cache is responsible.
|
||||
// If this failed, well we do not store it and read it again next time.
|
||||
value.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool TraverserDocumentCache::validateFilter(
|
||||
StringRef idString, std::function<bool(VPackSlice const&)> filterFunc) {
|
||||
if (_cache != nullptr) {
|
||||
auto finding = lookup(idString);
|
||||
if (finding.found()) {
|
||||
auto val = finding.value();
|
||||
VPackSlice slice(val->value());
|
||||
// finding makes sure that slice contant stays valid.
|
||||
return filterFunc(slice);
|
||||
}
|
||||
}
|
||||
// Not in cache. Fetch and insert.
|
||||
VPackSlice slice = lookupAndCache(idString);
|
||||
return filterFunc(slice);
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017-2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_VOC_BASE_TRAVERSER_DOCUMENT_CACHE_H
|
||||
#define ARANGOD_VOC_BASE_TRAVERSER_DOCUMENT_CACHE_H 1
|
||||
|
||||
#include "VocBase/TraverserCache.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace cache {
|
||||
class Cache;
|
||||
class Finding;
|
||||
}
|
||||
|
||||
namespace traverser {
|
||||
|
||||
class TraverserDocumentCache : public TraverserCache {
|
||||
|
||||
public:
|
||||
explicit TraverserDocumentCache(transaction::Methods* trx);
|
||||
|
||||
~TraverserDocumentCache();
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Inserts the real document stored within the token
|
||||
/// into the given builder.
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void insertIntoResult(StringRef idString,
|
||||
arangodb::velocypack::Builder& builder) override;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Return AQL value containing the result
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
aql::AqlValue fetchAqlResult(StringRef idString) override;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Insert value into store
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void insertDocument(StringRef idString,
|
||||
arangodb::velocypack::Slice const& document);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Throws the document referenced by the token into the filter
|
||||
/// function and returns it result.
|
||||
/// The document will be taken from the hash-cache.
|
||||
/// If it is not cached it will be looked up in the StorageEngine
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool validateFilter(StringRef idString,
|
||||
std::function<bool(arangodb::velocypack::Slice const&)> filterFunc);
|
||||
|
||||
protected:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Lookup a document by token in the cache.
|
||||
/// As long as finding is retained it is guaranteed that the result
|
||||
/// stays valid. Finding should not be retained very long, if it is
|
||||
/// needed for longer, copy the value.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
cache::Finding lookup(StringRef idString);
|
||||
|
||||
protected:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief The hash-cache that saves documents found in the Database
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::shared_ptr<arangodb::cache::Cache> _cache;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Lookup a document from the database and insert it into the cache.
|
||||
/// The Slice returned here is only valid until the NEXT call of this
|
||||
/// function.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
arangodb::velocypack::Slice lookupAndCache(
|
||||
StringRef idString);
|
||||
|
||||
|
||||
};
|
||||
} // namespace traverser
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -30,7 +30,6 @@
|
|||
#include "Cluster/ClusterEdgeCursor.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "VocBase/SingleServerTraverser.h"
|
||||
#include "VocBase/TraverserCache.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -39,7 +38,6 @@ using namespace arangodb;
|
|||
using namespace arangodb::graph;
|
||||
using namespace arangodb::transaction;
|
||||
using namespace arangodb::traverser;
|
||||
|
||||
using VPackHelper = arangodb::basics::VelocyPackHelper;
|
||||
|
||||
TraverserOptions::TraverserOptions(transaction::Methods* trx)
|
||||
|
@ -71,8 +69,7 @@ TraverserOptions::TraverserOptions(transaction::Methods* trx,
|
|||
useBreadthFirst = VPackHelper::getBooleanValue(obj, "bfs", false);
|
||||
std::string tmp = VPackHelper::getStringValue(obj, "uniqueVertices", "");
|
||||
if (tmp == "path") {
|
||||
uniqueVertices =
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::PATH;
|
||||
uniqueVertices = TraverserOptions::UniquenessLevel::PATH;
|
||||
} else if (tmp == "global") {
|
||||
if (!useBreadthFirst) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
|
@ -80,23 +77,21 @@ TraverserOptions::TraverserOptions(transaction::Methods* trx,
|
|||
"supported, with bfs: true due to "
|
||||
"unpredictable results.");
|
||||
}
|
||||
uniqueVertices =
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL;
|
||||
uniqueVertices = TraverserOptions::UniquenessLevel::GLOBAL;
|
||||
} else {
|
||||
uniqueVertices =
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::NONE;
|
||||
uniqueVertices = TraverserOptions::UniquenessLevel::NONE;
|
||||
}
|
||||
|
||||
tmp = VPackHelper::getStringValue(obj, "uniqueEdges", "");
|
||||
if (tmp == "none") {
|
||||
uniqueEdges = arangodb::traverser::TraverserOptions::UniquenessLevel::NONE;
|
||||
uniqueEdges = TraverserOptions::UniquenessLevel::NONE;
|
||||
} else if (tmp == "global") {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"uniqueEdges: 'global' is not supported, "
|
||||
"due to unpredictable results. Use 'path' "
|
||||
"or 'none' instead");
|
||||
} else {
|
||||
uniqueEdges = arangodb::traverser::TraverserOptions::UniquenessLevel::PATH;
|
||||
uniqueEdges = TraverserOptions::UniquenessLevel::PATH;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,12 +242,9 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
|
|||
_baseVertexExpression = new aql::Expression(query->ast(), read);
|
||||
}
|
||||
// Check for illegal option combination:
|
||||
TRI_ASSERT(uniqueEdges !=
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL);
|
||||
TRI_ASSERT(
|
||||
uniqueVertices !=
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL ||
|
||||
useBreadthFirst);
|
||||
TRI_ASSERT(uniqueEdges != TraverserOptions::UniquenessLevel::GLOBAL);
|
||||
TRI_ASSERT(uniqueVertices != TraverserOptions::UniquenessLevel::GLOBAL ||
|
||||
useBreadthFirst);
|
||||
}
|
||||
|
||||
arangodb::traverser::TraverserOptions::TraverserOptions(
|
||||
|
@ -272,23 +264,19 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
|
|||
TRI_ASSERT(other._baseVertexExpression == nullptr);
|
||||
|
||||
// Check for illegal option combination:
|
||||
TRI_ASSERT(uniqueEdges !=
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL);
|
||||
TRI_ASSERT(
|
||||
uniqueVertices !=
|
||||
arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL ||
|
||||
useBreadthFirst);
|
||||
TRI_ASSERT(uniqueEdges != TraverserOptions::UniquenessLevel::GLOBAL);
|
||||
TRI_ASSERT(uniqueVertices != TraverserOptions::UniquenessLevel::GLOBAL ||
|
||||
useBreadthFirst);
|
||||
}
|
||||
|
||||
arangodb::traverser::TraverserOptions::~TraverserOptions() {
|
||||
TraverserOptions::~TraverserOptions() {
|
||||
for (auto& pair : _vertexExpressions) {
|
||||
delete pair.second;
|
||||
}
|
||||
delete _baseVertexExpression;
|
||||
}
|
||||
|
||||
void arangodb::traverser::TraverserOptions::toVelocyPack(
|
||||
VPackBuilder& builder) const {
|
||||
void TraverserOptions::toVelocyPack(VPackBuilder& builder) const {
|
||||
VPackObjectBuilder guard(&builder);
|
||||
|
||||
builder.add("minDepth", VPackValue(minDepth));
|
||||
|
@ -296,32 +284,31 @@ void arangodb::traverser::TraverserOptions::toVelocyPack(
|
|||
builder.add("bfs", VPackValue(useBreadthFirst));
|
||||
|
||||
switch (uniqueVertices) {
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::NONE:
|
||||
case TraverserOptions::UniquenessLevel::NONE:
|
||||
builder.add("uniqueVertices", VPackValue("none"));
|
||||
break;
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::PATH:
|
||||
case TraverserOptions::UniquenessLevel::PATH:
|
||||
builder.add("uniqueVertices", VPackValue("path"));
|
||||
break;
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL:
|
||||
case TraverserOptions::UniquenessLevel::GLOBAL:
|
||||
builder.add("uniqueVertices", VPackValue("global"));
|
||||
break;
|
||||
}
|
||||
|
||||
switch (uniqueEdges) {
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::NONE:
|
||||
case TraverserOptions::UniquenessLevel::NONE:
|
||||
builder.add("uniqueEdges", VPackValue("none"));
|
||||
break;
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::PATH:
|
||||
case TraverserOptions::UniquenessLevel::PATH:
|
||||
builder.add("uniqueEdges", VPackValue("path"));
|
||||
break;
|
||||
case arangodb::traverser::TraverserOptions::UniquenessLevel::GLOBAL:
|
||||
case TraverserOptions::UniquenessLevel::GLOBAL:
|
||||
builder.add("uniqueEdges", VPackValue("global"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void arangodb::traverser::TraverserOptions::toVelocyPackIndexes(
|
||||
VPackBuilder& builder) const {
|
||||
void TraverserOptions::toVelocyPackIndexes(VPackBuilder& builder) const {
|
||||
VPackObjectBuilder guard(&builder);
|
||||
|
||||
// base indexes
|
||||
|
@ -348,8 +335,7 @@ void arangodb::traverser::TraverserOptions::toVelocyPackIndexes(
|
|||
builder.close();
|
||||
}
|
||||
|
||||
void arangodb::traverser::TraverserOptions::buildEngineInfo(
|
||||
VPackBuilder& result) const {
|
||||
void TraverserOptions::buildEngineInfo(VPackBuilder& result) const {
|
||||
result.openObject();
|
||||
result.add("minDepth", VPackValue(minDepth));
|
||||
result.add("maxDepth", VPackValue(maxDepth));
|
||||
|
@ -441,17 +427,17 @@ void TraverserOptions::addDepthLookupInfo(aql::Ast* ast,
|
|||
attributeName, condition);
|
||||
}
|
||||
|
||||
bool arangodb::traverser::TraverserOptions::vertexHasFilter(
|
||||
uint64_t depth) const {
|
||||
bool TraverserOptions::vertexHasFilter(uint64_t depth) const {
|
||||
if (_baseVertexExpression != nullptr) {
|
||||
return true;
|
||||
}
|
||||
return _vertexExpressions.find(depth) != _vertexExpressions.end();
|
||||
}
|
||||
|
||||
bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
|
||||
arangodb::velocypack::Slice edge, StringRef vertexId, uint64_t depth,
|
||||
size_t cursorId) const {
|
||||
bool TraverserOptions::evaluateEdgeExpression(arangodb::velocypack::Slice edge,
|
||||
StringRef vertexId,
|
||||
uint64_t depth,
|
||||
size_t cursorId) const {
|
||||
if (_isCoordinator) {
|
||||
// The Coordinator never checks conditions. The DBServer is responsible!
|
||||
return true;
|
||||
|
@ -488,7 +474,7 @@ bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
|
|||
return evaluateExpression(expression, edge);
|
||||
}
|
||||
|
||||
bool arangodb::traverser::TraverserOptions::evaluateVertexExpression(
|
||||
bool TraverserOptions::evaluateVertexExpression(
|
||||
arangodb::velocypack::Slice vertex, uint64_t depth) const {
|
||||
arangodb::aql::Expression* expression = nullptr;
|
||||
|
||||
|
@ -521,21 +507,19 @@ arangodb::traverser::TraverserOptions::nextCursor(ManagedDocumentResult* mmdr,
|
|||
return nextCursorLocal(mmdr, vid, list);
|
||||
}
|
||||
|
||||
EdgeCursor*
|
||||
arangodb::traverser::TraverserOptions::nextCursorCoordinator(StringRef vid,
|
||||
uint64_t depth) {
|
||||
|
||||
EdgeCursor* TraverserOptions::nextCursorCoordinator(StringRef vid,
|
||||
uint64_t depth) {
|
||||
TRI_ASSERT(_traverser != nullptr);
|
||||
auto cursor = std::make_unique<ClusterEdgeCursor>(vid, depth, _traverser);
|
||||
return cursor.release();
|
||||
}
|
||||
|
||||
void arangodb::traverser::TraverserOptions::linkTraverser(
|
||||
arangodb::traverser::ClusterTraverser* trav) {
|
||||
void TraverserOptions::linkTraverser(ClusterTraverser* trav) {
|
||||
_traverser = trav;
|
||||
}
|
||||
|
||||
double arangodb::traverser::TraverserOptions::estimateCost(
|
||||
size_t& nrItems) const {
|
||||
double TraverserOptions::estimateCost(size_t& nrItems) const {
|
||||
size_t count = 1;
|
||||
double cost = 0;
|
||||
size_t baseCreateItems = 0;
|
||||
|
|
|
@ -539,10 +539,12 @@ int TRI_vocbase_t::loadCollection(arangodb::LogicalCollection* collection,
|
|||
|
||||
/// @brief drops a collection, worker function
|
||||
int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
|
||||
DropState& state) {
|
||||
DropState& state, double timeout) {
|
||||
state = DROP_EXIT;
|
||||
std::string const colName(collection->name());
|
||||
|
||||
double startTime = TRI_microtime();
|
||||
|
||||
// do not acquire these locks instantly
|
||||
CONDITIONAL_WRITE_LOCKER(writeLocker, _collectionsLock,
|
||||
basics::ConditionalLocking::DoNotLock);
|
||||
|
@ -570,6 +572,11 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
|
|||
TRI_ASSERT(!writeLocker.isLocked());
|
||||
TRI_ASSERT(!locker.isLocked());
|
||||
|
||||
if (timeout >= 0.0 && TRI_microtime() > startTime + timeout) {
|
||||
events::DropCollection(colName, TRI_ERROR_LOCK_TIMEOUT);
|
||||
return TRI_ERROR_LOCK_TIMEOUT;
|
||||
}
|
||||
|
||||
// sleep for a while
|
||||
std::this_thread::yield();
|
||||
}
|
||||
|
@ -1038,7 +1045,7 @@ int TRI_vocbase_t::unloadCollection(arangodb::LogicalCollection* collection,
|
|||
|
||||
/// @brief drops a collection
|
||||
int TRI_vocbase_t::dropCollection(arangodb::LogicalCollection* collection,
|
||||
bool allowDropSystem) {
|
||||
bool allowDropSystem, double timeout) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
|
@ -1053,7 +1060,7 @@ int TRI_vocbase_t::dropCollection(arangodb::LogicalCollection* collection,
|
|||
{
|
||||
READ_LOCKER(readLocker, _inventoryLock);
|
||||
|
||||
res = dropCollectionWorker(collection, state);
|
||||
res = dropCollectionWorker(collection, state, timeout);
|
||||
}
|
||||
|
||||
if (state == DROP_PERFORM) {
|
||||
|
|
|
@ -296,9 +296,11 @@ struct TRI_vocbase_t {
|
|||
arangodb::LogicalCollection* createCollection(
|
||||
arangodb::velocypack::Slice parameters);
|
||||
|
||||
/// @brief drops a collection
|
||||
/// @brief drops a collection, no timeout if timeout is < 0.0, otherwise
|
||||
/// timeout is in seconds. Essentially, the timeout counts to acquire the
|
||||
/// write lock for using the collection.
|
||||
int dropCollection(arangodb::LogicalCollection* collection,
|
||||
bool allowDropSystem);
|
||||
bool allowDropSystem, double timeout);
|
||||
|
||||
/// @brief callback for collection dropping
|
||||
static bool DropCollectionCallback(arangodb::LogicalCollection* collection);
|
||||
|
@ -362,7 +364,7 @@ struct TRI_vocbase_t {
|
|||
|
||||
/// @brief drops a collection, worker function
|
||||
int dropCollectionWorker(arangodb::LogicalCollection* collection,
|
||||
DropState& state);
|
||||
DropState& state, double timeout);
|
||||
|
||||
/// @brief creates a new view, worker function
|
||||
std::shared_ptr<arangodb::LogicalView> createViewWorker(
|
||||
|
|
|
@ -226,3 +226,14 @@ if (MSVC)
|
|||
install (FILES "${LIB_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
install (FILES "${SSL_EAY_RELEASE_DLL}" DESTINATION "${CMAKE_INSTALL_BINDIR}/" COMPONENT Libraries)
|
||||
endif()
|
||||
|
||||
|
||||
if (THIRDPARTY_SBIN)
|
||||
install(FILES ${THIRDPARTY_SBIN}
|
||||
DESTINATION "${CMAKE_INSTALL_SBINDIR}")
|
||||
endif()
|
||||
|
||||
if (THIRDPARTY_BIN)
|
||||
install(FILES ${THIRDPARTY_BIN}
|
||||
DESTINATION "${CMAKE_INSTALL_BINDIR}")
|
||||
endif()
|
||||
|
|
|
@ -150,6 +150,7 @@ add_custom_target(remove_packages
|
|||
COMMAND ${CMAKE_COMMAND} -E remove ${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.deb
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.deb
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${ARANGODB_DBG_PACKAGE_FILE_NAME}.deb
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${PROJECT_BINARY_DIR}/bin/strip/*
|
||||
COMMENT Removing local target packages
|
||||
)
|
||||
|
||||
|
|
|
@ -83,8 +83,11 @@ add_custom_target(remove_packages
|
|||
COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.rpm
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_CLIENT_PACKAGE_FILE_NAME}.rpm
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_DBG_PACKAGE_FILE_NAME}.rpm
|
||||
COMMAND ${CMAKE_COMMAND} -E remove ${PROJECT_BINARY_DIR}/bin/strip/*
|
||||
)
|
||||
|
||||
|
||||
|
||||
list(APPEND CLEAN_PACKAGES_LIST remove_packages)
|
||||
|
||||
|
||||
|
|
|
@ -562,11 +562,12 @@
|
|||
return;
|
||||
}
|
||||
var self = this;
|
||||
if (!this.collectionsView) {
|
||||
this.collectionsView = new window.CollectionsView({
|
||||
collection: this.arangoCollectionsStore
|
||||
});
|
||||
if (this.collectionsView) {
|
||||
this.collectionsView.remove();
|
||||
}
|
||||
this.collectionsView = new window.CollectionsView({
|
||||
collection: this.arangoCollectionsStore
|
||||
});
|
||||
this.arangoCollectionsStore.fetch({
|
||||
cache: false,
|
||||
success: function () {
|
||||
|
|
|
@ -106,7 +106,7 @@
|
|||
<div id="collectionsThumbnailsIn" class="tileList pure-g">
|
||||
<div class="tile pure-u-1-1 pure-u-sm-1-2 pure-u-md-1-3 pure-u-lg-1-4 pure-u-xl-1-6">
|
||||
<div class="fullBorderBox">
|
||||
<a href="#" id="createCollection" class="add"><span id="newCollection" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i>
|
||||
<a id="createCollection" class="add"><span id="newCollection" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i>
|
||||
</span> Add Collection</a>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
<div id="userManagementThumbnailsIn" class="tileList pure-u">
|
||||
<div class="tile pure-u-1-1 pure-u-sm-1-2 pure-u-md-1-3 pure-u-lg-1-4 pure-u-xl-1-6">
|
||||
<div class="fullBorderBox">
|
||||
<a href="#" id="createUser" class="add">
|
||||
<a id="createUser" class="add">
|
||||
<span id="newUser" class="pull-left add-Icon"><i class="fa fa-plus-circle"></i></span>
|
||||
Add User
|
||||
</a>
|
||||
|
|
|
@ -13,6 +13,14 @@
|
|||
|
||||
template: templateEngine.createTemplate('collectionsView.ejs'),
|
||||
|
||||
remove: function () {
|
||||
this.$el.empty().off(); /* off to unbind the events */
|
||||
this.stopListening();
|
||||
this.unbind();
|
||||
delete this.el;
|
||||
return this;
|
||||
},
|
||||
|
||||
refetchCollections: function () {
|
||||
var self = this;
|
||||
this.collection.fetch({
|
||||
|
|
|
@ -45,7 +45,10 @@ function locateCatchTest (name) {
|
|||
var file = fs.join(pu.UNITTESTS_DIR, name + pu.executableExt);
|
||||
|
||||
if (!fs.exists(file)) {
|
||||
return '';
|
||||
file = fs.join(pu.BIN_DIR, name + pu.executableExt);
|
||||
if (!fs.exists(file)) {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
@ -54,8 +57,6 @@ function catchRunner (options) {
|
|||
let results = {};
|
||||
let rootDir = pu.UNITTESTS_DIR;
|
||||
|
||||
const icuDir = pu.UNITTESTS_DIR + '/';
|
||||
require('internal').env.ICU_DATA = icuDir;
|
||||
const run = locateCatchTest('arangodbtests');
|
||||
if (!options.skipCatch) {
|
||||
if (run !== '') {
|
||||
|
@ -64,7 +65,7 @@ function catchRunner (options) {
|
|||
'-r',
|
||||
'junit',
|
||||
'-o',
|
||||
fs.join('out', 'catch-standard.xml')];
|
||||
fs.join(options.testOutputDirectory, 'catch-standard.xml')];
|
||||
results.basics = pu.executeAndWait(run, argv, options, 'all-catch', rootDir);
|
||||
} else {
|
||||
results.basics = {
|
||||
|
@ -80,7 +81,7 @@ function catchRunner (options) {
|
|||
'-r',
|
||||
'junit',
|
||||
'-o',
|
||||
fs.join('out', 'catch-cache.xml')
|
||||
fs.join(options.testOutputDirectory, 'catch-cache.xml')
|
||||
];
|
||||
results.cache_suite = pu.executeAndWait(run, argv, options,
|
||||
'cache_suite', rootDir);
|
||||
|
@ -98,7 +99,7 @@ function catchRunner (options) {
|
|||
'-r',
|
||||
'junit',
|
||||
'-o',
|
||||
fs.join('out', 'catch-geo.xml')
|
||||
fs.join(options.testOutputDirectory, 'catch-geo.xml')
|
||||
];
|
||||
results.geo_suite = pu.executeAndWait(run, argv, options, 'geo_suite', rootDir);
|
||||
} else {
|
||||
|
|
|
@ -941,7 +941,16 @@ function executePlanForCollections(plannedCollections) {
|
|||
database,
|
||||
collections[collection].planId);
|
||||
|
||||
db._drop(collection);
|
||||
try {
|
||||
db._drop(collection, {timeout:1.0});
|
||||
}
|
||||
catch (err) {
|
||||
console.debug("could not drop local shard '%s/%s' of '%s/%s within 1 second, trying again later",
|
||||
database,
|
||||
collection,
|
||||
database,
|
||||
collections[collection].planId);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -983,6 +992,9 @@ function updateCurrentForCollections(localErrors, currentCollections) {
|
|||
Object.assign(agencyIndex, index);
|
||||
// Fix up the IDs of the indexes:
|
||||
let pos = index.id.indexOf("/");
|
||||
if (agencyIndex.hasOwnProperty("selectivityEstimate")) {
|
||||
delete agencyIndex.selectivityEstimate;
|
||||
}
|
||||
if (pos >= 0) {
|
||||
agencyIndex.id = index.id.slice(pos+1);
|
||||
} else {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include "Basics/Utf8Helper.h"
|
||||
#include "Basics/files.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Basics/directories.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
|
@ -95,6 +96,16 @@ void* LanguageFeature::prepareIcu(std::string const& binaryPath, std::string con
|
|||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << msg;
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
else {
|
||||
std::string icu_path = path.substr(0, path.length() - fn.length());
|
||||
FileUtils::makePathAbsolute(icu_path);
|
||||
FileUtils::normalizePath(icu_path);
|
||||
#ifndef _WIN32
|
||||
setenv("ICU_DATA", icu_path.c_str(), 1);
|
||||
#else
|
||||
SetEnvironmentVariable("ICU_DATA", icu_path.c_str());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void* icuDataPtr = TRI_SlurpFile(TRI_UNKNOWN_MEM_ZONE, path.c_str(), nullptr);
|
||||
|
|
|
@ -94,8 +94,11 @@ static char const* translateSignal(int signal) {
|
|||
}
|
||||
|
||||
static void StopHandler(int) {
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "received SIGINT for supervisor";
|
||||
kill(CLIENT_PID, SIGTERM);
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "received SIGINT for supervisor; commanding client [" << CLIENT_PID << "] to shut down.";
|
||||
int rc = kill(CLIENT_PID, SIGTERM);
|
||||
if (rc < 0) {
|
||||
LOG_TOPIC(ERR, Logger::STARTUP) << "commanding client [" << CLIENT_PID << "] to shut down failed: [" << errno << "] " << strerror(errno);
|
||||
}
|
||||
DONE = true;
|
||||
}
|
||||
|
||||
|
@ -187,7 +190,7 @@ void SupervisorFeature::daemonize() {
|
|||
signal(SIGINT, StopHandler);
|
||||
signal(SIGTERM, StopHandler);
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP) << "supervisor has forked a child process with pid " << _clientPid;
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "supervisor has forked a child process with pid " << _clientPid;
|
||||
|
||||
TRI_SetProcessTitle("arangodb [supervisor]");
|
||||
|
||||
|
@ -200,9 +203,9 @@ void SupervisorFeature::daemonize() {
|
|||
int res = waitpid(_clientPid, &status, 0);
|
||||
bool horrible = true;
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP) << "waitpid woke up with return value "
|
||||
<< res << " and status " << status
|
||||
<< " and DONE = " << (DONE ? "true" : "false");
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "waitpid woke up with return value "
|
||||
<< res << " and status " << status
|
||||
<< " and DONE = " << (DONE ? "true" : "false");
|
||||
|
||||
if (DONE) {
|
||||
// signal handler for SIGINT or SIGTERM was invoked
|
||||
|
|
|
@ -216,9 +216,9 @@ static void throwFileWriteError(int fd, std::string const& filename) {
|
|||
}
|
||||
|
||||
void spit(std::string const& filename, char const* ptr, size_t len) {
|
||||
int fd =
|
||||
TRI_TRACKED_CREATE_FILE(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
int fd = TRI_TRACKED_CREATE_FILE(filename.c_str(),
|
||||
O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
|
||||
if (fd == -1) {
|
||||
throwFileWriteError(fd, filename);
|
||||
|
@ -239,9 +239,9 @@ void spit(std::string const& filename, char const* ptr, size_t len) {
|
|||
}
|
||||
|
||||
void spit(std::string const& filename, std::string const& content) {
|
||||
int fd =
|
||||
TRI_TRACKED_CREATE_FILE(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
int fd = TRI_TRACKED_CREATE_FILE(filename.c_str(),
|
||||
O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
|
||||
if (fd == -1) {
|
||||
throwFileWriteError(fd, filename);
|
||||
|
@ -265,9 +265,9 @@ void spit(std::string const& filename, std::string const& content) {
|
|||
}
|
||||
|
||||
void spit(std::string const& filename, StringBuffer const& content) {
|
||||
int fd =
|
||||
TRI_TRACKED_CREATE_FILE(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
int fd = TRI_TRACKED_CREATE_FILE(filename.c_str(),
|
||||
O_WRONLY | O_CREAT | O_TRUNC | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR | S_IRGRP);
|
||||
|
||||
if (fd == -1) {
|
||||
throwFileWriteError(fd, filename);
|
||||
|
@ -623,9 +623,17 @@ std::string dirname(std::string const& name) {
|
|||
|
||||
void makePathAbsolute(std::string& path) {
|
||||
std::string cwd = FileUtils::currentDirectory().result();
|
||||
char* p = TRI_GetAbsolutePath(path.c_str(), cwd.c_str());
|
||||
path = p;
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, p);
|
||||
|
||||
if (path.empty()) {
|
||||
path = cwd;
|
||||
} else {
|
||||
char* p = TRI_GetAbsolutePath(path.c_str(), cwd.c_str());
|
||||
|
||||
if (p != nullptr) {
|
||||
path = p;
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -171,7 +171,8 @@ SslClientConnection::SslClientConnection(Endpoint* endpoint,
|
|||
: GeneralClientConnection(endpoint, requestTimeout, connectTimeout,
|
||||
connectRetries),
|
||||
_ssl(nullptr),
|
||||
_ctx(nullptr) {
|
||||
_ctx(nullptr),
|
||||
_sslProtocol(sslProtocol) {
|
||||
|
||||
TRI_invalidatesocket(&_socket);
|
||||
init(sslProtocol);
|
||||
|
@ -185,7 +186,8 @@ SslClientConnection::SslClientConnection(std::unique_ptr<Endpoint>& endpoint,
|
|||
: GeneralClientConnection(endpoint, requestTimeout, connectTimeout,
|
||||
connectRetries),
|
||||
_ssl(nullptr),
|
||||
_ctx(nullptr) {
|
||||
_ctx(nullptr),
|
||||
_sslProtocol(sslProtocol) {
|
||||
|
||||
TRI_invalidatesocket(&_socket);
|
||||
init(sslProtocol);
|
||||
|
@ -293,7 +295,14 @@ bool SslClientConnection::connectSocket() {
|
|||
_isConnected = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
switch (protocol_e(_sslProtocol)) {
|
||||
case TLS_V1:
|
||||
case TLS_V12:
|
||||
default:
|
||||
SSL_set_tlsext_host_name(_ssl, _endpoint->host().c_str());
|
||||
}
|
||||
|
||||
SSL_set_connect_state(_ssl);
|
||||
|
||||
if (SSL_set_fd(_ssl, (int)TRI_get_fd_or_handle_of_socket(_socket)) != 1) {
|
||||
|
|
|
@ -131,6 +131,12 @@ class SslClientConnection final : public GeneralClientConnection {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SSL_CTX* _ctx;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief SSL version
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
uint64_t _sslProtocol;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -525,7 +525,10 @@ static std::string GetEndpointFromUrl(std::string const& url) {
|
|||
size_t slashes = 0;
|
||||
|
||||
while (p < e) {
|
||||
if (*p == '/') {
|
||||
if (*p == '?') {
|
||||
// http(s)://example.com?foo=bar
|
||||
return url.substr(0, p - url.c_str());
|
||||
} else if (*p == '/') {
|
||||
if (++slashes == 3) {
|
||||
return url.substr(0, p - url.c_str());
|
||||
}
|
||||
|
@ -777,33 +780,25 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
std::string relative;
|
||||
|
||||
if (url.substr(0, 7) == "http://") {
|
||||
size_t found = url.find('/', 7);
|
||||
endpoint = GetEndpointFromUrl(url).substr(7);
|
||||
relative = url.substr(7 + endpoint.length());
|
||||
|
||||
relative = "/";
|
||||
if (found != std::string::npos) {
|
||||
relative.append(url.substr(found + 1));
|
||||
endpoint = url.substr(7, found - 7);
|
||||
} else {
|
||||
endpoint = url.substr(7);
|
||||
if (relative.empty() || relative[0] != '/') {
|
||||
relative = "/" + relative;
|
||||
}
|
||||
found = endpoint.find(":");
|
||||
if (found == std::string::npos) {
|
||||
endpoint = endpoint + ":80";
|
||||
if (endpoint.find(':') == std::string::npos) {
|
||||
endpoint.append(":80");
|
||||
}
|
||||
endpoint = "tcp://" + endpoint;
|
||||
} else if (url.substr(0, 8) == "https://") {
|
||||
size_t found = url.find('/', 8);
|
||||
endpoint = GetEndpointFromUrl(url).substr(8);
|
||||
relative = url.substr(8 + endpoint.length());
|
||||
|
||||
relative = "/";
|
||||
if (found != std::string::npos) {
|
||||
relative.append(url.substr(found + 1));
|
||||
endpoint = url.substr(8, found - 8);
|
||||
} else {
|
||||
endpoint = url.substr(8);
|
||||
if (relative.empty() || relative[0] != '/') {
|
||||
relative = "/" + relative;
|
||||
}
|
||||
found = endpoint.find(":");
|
||||
if (found == std::string::npos) {
|
||||
endpoint = endpoint + ":443";
|
||||
if (endpoint.find(':') == std::string::npos) {
|
||||
endpoint.append(":443");
|
||||
}
|
||||
endpoint = "ssl://" + endpoint;
|
||||
} else if (url.substr(0, 6) == "srv://") {
|
||||
|
@ -818,14 +813,25 @@ void JS_Download(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
}
|
||||
endpoint = "srv://" + endpoint;
|
||||
} else if (!url.empty() && url[0] == '/') {
|
||||
size_t found;
|
||||
// relative URL. prefix it with last endpoint
|
||||
relative = url;
|
||||
url = lastEndpoint + url;
|
||||
endpoint = lastEndpoint;
|
||||
if (endpoint.substr(0, 5) == "http:") {
|
||||
endpoint = "tcp:" + endpoint.substr(5);
|
||||
endpoint = endpoint.substr(5);
|
||||
found = endpoint.find(":");
|
||||
if (found == std::string::npos) {
|
||||
endpoint = endpoint + ":80";
|
||||
}
|
||||
endpoint = "tcp:" + endpoint;
|
||||
} else if (endpoint.substr(0, 6) == "https:") {
|
||||
endpoint = "ssl:" + endpoint.substr(6);
|
||||
endpoint = endpoint.substr(6);
|
||||
found = endpoint.find(":");
|
||||
if (found == std::string::npos) {
|
||||
endpoint = endpoint + ":443";
|
||||
}
|
||||
endpoint = "ssl:" + endpoint;
|
||||
}
|
||||
} else {
|
||||
TRI_V8_THROW_SYNTAX_ERROR("unsupported URL specified");
|
||||
|
|
|
@ -1,2 +1,14 @@
|
|||
#define CATCH_CONFIG_MAIN
|
||||
#define CATCH_CONFIG_RUNNER
|
||||
#include "catch.hpp"
|
||||
#include "Logger/Logger.h"
|
||||
|
||||
int main( int argc, char* argv[] )
|
||||
{
|
||||
// global setup...
|
||||
arangodb::Logger::initialize(false);
|
||||
int result = Catch::Session().run( argc, argv );
|
||||
arangodb::Logger::shutdown();
|
||||
// global clean-up...
|
||||
|
||||
return ( result < 0xff ? result : 0xff );
|
||||
}
|
Loading…
Reference in New Issue