1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api

This commit is contained in:
jsteemann 2017-02-24 17:51:59 +01:00
commit d0df1952e8
158 changed files with 2278 additions and 1457 deletions

7
.gitignore vendored
View File

@ -30,6 +30,7 @@ compile_commands.json
instanceinfo.json
testresult.json
testsStarted
soc-pokec-*
build.sh
build*/
@ -99,12 +100,6 @@ js/apps/system/_admin/aardvark/APP/frontend/build/scripts.html.part
js/common/tests/shell/shell-database.js
3rdParty/boost/1.61.0/b2
3rdParty/boost/1.61.0/bin.v2/
3rdParty/boost/1.61.0/bjam
3rdParty/boost/1.61.0/project-config.jam
3rdParty/boost/1.61.0/stage/
.gdb-history
npm-debug.log

View File

@ -11,6 +11,7 @@ branches:
- "2.8"
- "3.0"
- "3.1"
- "3.2"
language: cpp
cache: ccache

View File

@ -578,14 +578,24 @@ set(ICU_DT ${ICU_DT} PARENT_SCOPE)
set(ICU_DT_DEST "icudtl.dat" )
set(ICU_DT_DEST ${ICU_DT_DEST} PARENT_SCOPE)
configure_file(
"${ICU_DT}"
"${CMAKE_BINARY_DIR}/bin/${CONFIGURATION}/${ICU_DT_DEST}"
COPYONLY)
configure_file(
"${ICU_DT}"
"${CMAKE_BINARY_DIR}/tests/${CONFIGURATION}/${ICU_DT_DEST}"
COPYONLY)
if (MSVC)
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/${ICU_DT_DEST})
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/$<CONFIG>/${ICU_DT_DEST})
else()
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/${ICU_DT_DEST})
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/${ICU_DT_DEST})
endif()
if (NOT WIN32)
add_custom_target(nonthinV8
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../../Installation/archive-de-thinner.sh ${V8_REL_TARGET_PATH}

View File

@ -113,11 +113,16 @@ class Collection {
ObjectIterator it(slice);
while (it.valid()) {
result.emplace(std::move(it.key(true).copyString()));
result.emplace(it.key(true).copyString());
it.next();
}
}
template<typename T>
static void keys(Slice const* slice, T& result) {
return keys(*slice, result);
}
static void keys(Slice const& slice, std::vector<std::string>& result) {
// pre-allocate result vector
result.reserve(checkOverflow(slice.length()));
@ -130,9 +135,19 @@ class Collection {
}
}
template<typename T>
static void unorderedKeys(Slice const& slice, T& result) {
ObjectIterator it(slice, true);
while (it.valid()) {
result.emplace(it.key(true).copyString());
it.next();
}
}
template<typename T>
static void keys(Slice const* slice, T& result) {
return keys(*slice, result);
static void unorderedKeys(Slice const* slice, T& result) {
return unorderedKeys(*slice, result);
}
static Builder extract(Slice const& slice, int64_t from, int64_t to = INT64_MAX);

View File

@ -1,6 +1,12 @@
devel
-----
* don't let read-only transactions block the WAL collector
v3.2.alpha2 (2017-02-20)
------------------------
* ui: fixed issue #2065
* ui: fixed a dashboard related memory issue
@ -10,12 +16,12 @@ devel
* Removed undocumented internal HTTP API:
* PUT _api/edges
The documented GET _api/edges and the undocumented POST _api/edges remains unmodified.
* moved V8 code into a git submodule
this requires running the command
git submodule update --init --recursive
once after a source code update or fresh checkout
@ -35,16 +41,22 @@ devel
arangoexport can be used to export collections to json, jsonl or xml
and export a graph or collections to xgmml.
* fixed a race condition when closing a connection
* raised default hard limit on threads for very small to 64
* fixed negative counting of http connection in UI
v3.2.alpha1 (2017-02-05)
------------------------
* added figure `httpRequests` to AQL query statistics
* removed revisions cache intermediate layer implementation
* obsoleted startup options `--database.revision-cache-chunk-size` and
`--database.revision-cache-target-size`
`--database.revision-cache-target-size`
* fix potential port number over-/underruns
@ -58,7 +70,29 @@ v3.2.alpha1 (2017-02-05)
* more detailed stacktraces in Foxx apps
v3.1.11 (2017-02-14)
v3.1.12 (XXXX-XX-XX)
--------------------
* disable shell color escape sequences on Windows
* fixed issue #2326
* fixed issue #2320
* fixed issue #2315
* fixed a race condition when closing a connection
* raised default hard limit on threads for very small to 64
* fixed negative counting of http connection in UI
* fixed a race when renaming collections
* fixed a race when dropping databases
v3.1.11 (2017-02-17)
--------------------
* fixed a race between connection closing and sending out last chunks of data to clients
@ -168,9 +202,10 @@ shards.
* added server startup option `--query.memory-limit`
* added convenience function to create vertex-centric indexes.
Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})`
That will create an index that can be used on OUTBOUND with filtering on the
edge attribute `label`.
Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})`
That will create an index that can be used on OUTBOUND with filtering on the
edge attribute `label`.
* change default log output for tools to stdout (instead of stderr)
@ -641,6 +676,8 @@ v3.1.alpha2 (2016-09-01)
v3.0.13 (XXXX-XX-XX)
--------------------
* fixed issue #2315
* fixed issue #2210

View File

@ -97,7 +97,7 @@ set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - dataexporter")
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - datae xporter")
set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer")
set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
@ -933,6 +933,7 @@ add_subdirectory(Documentation)
add_dependencies(arangobench zlibstatic)
add_dependencies(arangod zlibstatic)
add_dependencies(arangodump zlibstatic)
add_dependencies(arangoexport zlibstatic)
add_dependencies(arangoimp zlibstatic)
add_dependencies(arangorestore zlibstatic)
add_dependencies(arangosh zlibstatic)
@ -942,6 +943,7 @@ if (NOT USE_PRECOMPILED_V8)
add_dependencies(arangobench v8_build)
add_dependencies(arangod v8_build)
add_dependencies(arangodump v8_build)
add_dependencies(arangoexport v8_build)
add_dependencies(arangoimp v8_build)
add_dependencies(arangorestore v8_build)
add_dependencies(arangosh v8_build)

View File

@ -48,7 +48,7 @@ Export JSONL
unix> arangoexport --type jsonl --collection test
This exports the collection *test* into the output directory *export* as jsonl. Every line in the export is one document from the collection *test* as json.
This exports the collection *test* into the output directory *export* as [jsonl](http://jsonlines.org). Every line in the export is one document from the collection *test* as json.
Export XML
----------

View File

@ -71,6 +71,43 @@ ArangoDB can also do a so called *broadcast bind* using
host. This may be useful on development systems that frequently change their
network setup like laptops.
### Special note on IPv6 link-local addresses
ArangoDB can also listen to IPv6 link-local addresses via adding the zone ID
to the IPv6 address in the form `[ipv6-link-local-address%zone-id]`. However,
what you probably instead want is to bind to a local IPv6 address. Local IPv6
addresses start with `fd`. If you only see a `fe80:` IPv6 address in your
interface configuration but no IPv6 address starting with `fd` your interface
has no local IPv6 address assigned. You can read more about IPv6 link-local
addresses [here](https://en.wikipedia.org/wiki/Link-local_address#IPv6).
** Example **
Bind to a link-local and local IPv6 address.
unix> ifconfig
This command lists all interfaces and assigned ip addresses. The link-local
address may be `fe80::6257:18ff:fe82:3ec6%eth0` (IPv6 address plus interface name).
A local IPv6 address may be `fd12:3456::789a`. To bind ArangoDB to it start
*arangod* with `--server.endpoint tcp://[fe80::6257:18ff:fe82:3ec6%eth0]:8529`.
Use telnet to test the connection.
unix> telnet fe80::6257:18ff:fe82:3ec6%eth0 8529
Trying fe80::6257:18ff:fe82:3ec6...
Connected to my-machine.
Escape character is '^]'.
GET / HTTP/1.1
HTTP/1.1 301 Moved Permanently
Location: /_db/_system/_admin/aardvark/index.html
Content-Type: text/html
Server: ArangoDB
Connection: Keep-Alive
Content-Length: 197
<html><head><title>Moved</title></head><body><h1>Moved</h1><p>This page has moved to <a href="/_db/_system/_admin/aardvark/index.html">/_db/_system/_admin/aardvark/index.html</a>.</p></body></html>
### Reuse address

View File

@ -330,7 +330,11 @@ while [ $# -gt 0 ]; do
--targetDir)
shift
TARGET_DIR=$1
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1"
if test "`uname -o||true`" == "Cygwin"; then
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=`cygpath --windows $1`"
else
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1"
fi
shift
;;

View File

@ -1,5 +1,5 @@
name: @CPACK_PACKAGE_NAME@
version: @CPACK_PACKAGE_VERSION@
version: @CPACK_PACKAGE_VERSION@-@ARANGODB_PACKAGE_REVISION@
description: "ArangoDB is a native multi-model database with flexible data models for
documents, graphs, and key-values. Build high performance applications using a convenient
SQL-like query language or JavaScript extensions. https://www.arangodb.com"
@ -15,9 +15,9 @@ grade: stable
parts:
@CPACK_PACKAGE_NAME@:
source: @CPACK_PACKAGE_TGZ@
source: @CMAKE_BINARY_DIR@/@CPACK_PACKAGE_FILE_NAME@.tar.gz
plugin: dump
snap:
prime:
- -etc
- -var
- -lib

View File

@ -366,6 +366,34 @@ via the environment variable or in the menu. Given we want to store the symbols
You then will be able to see stack traces in the debugger.
You may also try to download the symbols manually using:
symchk.exe arangod.exe /s SRV*e:/symbol_cache/cache*https://www.arangodb.com/repositories/symsrv/
The symbolserver over at https://www.arangodb.com/repositories/symsrv/ is browseable; thus you can easily download the files you need by hand. It contains of a list of directories corosponding to the components of arangodb:
- arango - the basic arangodb library needed by all components
- arango_v8 - the basic V8 wrappers needed by all components
- arangod - the server process
- the client utilities:
- arangob
- arangobench
- arangoexport
- arangoimp
- arangorestore
- arangosh
- arangovpack
In these directories you will find subdirectories with the hash corosponding to the id of the binaries. Their date should corrospond to the release date of their respective arango release.
This means i.e. for ArangoDB 3.1.11:
https://www.arangodb.com/repositories/symsrv/arangod.pdb/A8B899D2EDFC40E994C30C32FCE5FB346/arangod.pd_
This file is a microsoft cabinet file, which is a little bit compressed. You can dismantle it so the windows explorer offers you its proper handler by renaming it to .cab; click on the now named `arangod.cab`, copy the contained arangod.pdb into your symbol path.
Coredump analysis
-----------------
While Visual studio may cary a nice shiny gui, the concept of GUI fails miserably i.e. in testautomation. Getting an overview over all running threads is a tedious task with it. Here the commandline version of [WinDBG](http://www.windbg.org/) cdb comes to the aid. `testing.js` utilizes it to obtain automatical stack traces for crashes.

View File

@ -25,13 +25,7 @@
#include "Agency/Agent.h"
#include "Agency/Job.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -1,4 +1,4 @@
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
@ -811,7 +811,7 @@ bool AgencyComm::exists(std::string const& key) {
return false;
}
auto parts = arangodb::basics::StringUtils::split(key, "/");
auto parts = basics::StringUtils::split(key, "/");
std::vector<std::string> allParts;
allParts.reserve(parts.size() + 1);
allParts.push_back(AgencyCommManager::path());
@ -1130,7 +1130,7 @@ bool AgencyComm::ensureStructureInitialized() {
std::vector<std::string>({AgencyCommManager::path(), "Secret"}));
if (!secretValue.isString()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Couldn't find secret in agency!";
LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't find secret in agency!";
return false;
}
std::string const secret = secretValue.copyString();
@ -1489,16 +1489,7 @@ AgencyCommResult AgencyComm::send(
<< "': " << body;
arangodb::httpclient::SimpleHttpClient client(connection, timeout, false);
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr only happens during controlled shutdown
result._message = "could not send request to agency because of shutdown";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "could not send request to agency because of shutdown";
return result;
}
client.setJwt(cc->jwt());
client.setJwt(ClusterComm::instance()->jwt());
client.keepConnectionOnDestruction(true);
// set up headers
@ -1699,10 +1690,10 @@ bool AgencyComm::tryInitializeStructure(std::string const& jwtSecret) {
return result.successful();
} catch (std::exception const& e) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency " << e.what();
LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency " << e.what();
FATAL_ERROR_EXIT();
} catch (...) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency";
LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency";
FATAL_ERROR_EXIT();
}
}

View File

@ -629,6 +629,14 @@ class AgencyComm {
void updateEndpoints(arangodb::velocypack::Slice const&);
bool lockRead(std::string const&, double, double);
bool lockWrite(std::string const&, double, double);
bool unlockRead(std::string const&, double);
bool unlockWrite(std::string const&, double);
AgencyCommResult sendTransactionWithFailover(AgencyTransaction const&,
double timeout = 0.0);

View File

@ -1,3 +1,4 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
@ -47,15 +48,18 @@ Agent::Agent(config_t const& config)
_config(config),
_lastCommitIndex(0),
_lastAppliedIndex(0),
_lastCompactionIndex(0),
_leaderCommitIndex(0),
_spearhead(this),
_readDB(this),
_transient(this),
_compacted(this),
_nextCompationAfter(_config.compactionStepSize()),
_inception(std::make_unique<Inception>(this)),
_activator(nullptr),
_compactor(this),
_ready(false) {
_ready(false),
_preparing(false) {
_state.configure(this);
_constituent.configure(this);
}
@ -153,7 +157,7 @@ std::string Agent::leaderID() const {
/// Are we leading?
bool Agent::leading() const {
return _constituent.leading();
return _preparing || _constituent.leading();
}
/// Start constituent personality
@ -272,14 +276,14 @@ bool Agent::recvAppendEntriesRPC(
// Update commit index
if (queries->slice().type() != VPackValueType::Array) {
LOG_TOPIC(WARN, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Received malformed entries for appending. Discarding!";
return false;
}
if (!_constituent.checkLeader(term, leaderId, prevIndex, prevTerm)) {
LOG_TOPIC(WARN, Logger::AGENCY) << "Not accepting appendEntries from "
<< leaderId;
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Not accepting appendEntries from " << leaderId;
return false;
}
@ -324,8 +328,8 @@ bool Agent::recvAppendEntriesRPC(
/// Leader's append entries
void Agent::sendAppendEntriesRPC() {
std::chrono::duration<int, std::ratio<1, 1000000>> const dt (
(_config.waitForSync() ? 40000 : 2000));
std::chrono::duration<int, std::ratio<1, 1000>> const dt (
(_config.waitForSync() ? 40 : 2));
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr only happens during controlled shutdown
@ -351,12 +355,6 @@ void Agent::sendAppendEntriesRPC() {
std::vector<log_t> unconfirmed = _state.get(last_confirmed);
if (unconfirmed.empty()) {
// this can only happen if the log is totally empty (I think, Max)
// and so it is OK, to skip the time check here
continue;
}
index_t highest = unconfirmed.back().index;
// _lastSent, _lastHighest: local and single threaded access
@ -378,7 +376,8 @@ void Agent::sendAppendEntriesRPC() {
// Body
Builder builder;
builder.add(VPackValue(VPackValueType::Array));
if ((system_clock::now() - _earliestPackage[followerId]).count() > 0) {
if (!_preparing &&
((system_clock::now() - _earliestPackage[followerId]).count() > 0)) {
for (size_t i = 1; i < unconfirmed.size(); ++i) {
auto const& entry = unconfirmed.at(i);
builder.add(VPackValue(VPackValueType::Object));
@ -413,8 +412,9 @@ void Agent::sendAppendEntriesRPC() {
"1", 1, _config.poolAt(followerId),
arangodb::rest::RequestType::POST, path.str(),
std::make_shared<std::string>(builder.toJson()), headerFields,
std::make_shared<AgentCallback>(this, followerId, highest, toLog),
5.0 * _config.maxPing(), true);
std::make_shared<AgentCallback>(
this, followerId, (toLog) ? highest : 0, toLog),
std::max(1.0e-3 * toLog * dt.count(), 0.25 * _config.minPing()), true);
// _lastSent, _lastHighest: local and single threaded access
_lastSent[followerId] = system_clock::now();
@ -422,7 +422,7 @@ void Agent::sendAppendEntriesRPC() {
if (toLog > 0) {
_earliestPackage[followerId] = system_clock::now() + toLog * dt;
LOG_TOPIC(TRACE, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Appending " << unconfirmed.size() - 1 << " entries up to index "
<< highest << " to follower " << followerId << ". Message: "
<< builder.toJson()
@ -430,7 +430,7 @@ void Agent::sendAppendEntriesRPC() {
<< std::chrono::duration<double, std::milli>(
_earliestPackage[followerId]-system_clock::now()).count() << "ms";
} else {
LOG_TOPIC(TRACE, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Just keeping follower " << followerId
<< " devout with " << builder.toJson();
}
@ -837,7 +837,7 @@ void Agent::run() {
sendAppendEntriesRPC();
// Don't panic
_appendCV.wait(1000);
_appendCV.wait(100);
// Detect faulty agent and replace
// if possible and only if not already activating
@ -1000,6 +1000,7 @@ void Agent::beginShutdown() {
void Agent::prepareLead() {
_preparing = true;
// Key value stores
rebuildDBs();
@ -1020,9 +1021,11 @@ void Agent::lead() {
// Wake up run
{
CONDITION_LOCKER(guard, _appendCV);
_preparing = false;
guard.broadcast();
}
// Agency configuration
term_t myterm;
{
@ -1169,17 +1172,23 @@ arangodb::consensus::index_t Agent::rebuildDBs() {
// Apply logs from last applied index to leader's commit index
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Rebuilding kvstores from index "
<< "Rebuilding key-value stores from index "
<< _lastAppliedIndex << " to " << _leaderCommitIndex;
_spearhead.apply(
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex),
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_readDB.apply(
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex),
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_compacted.apply(
_state.slices(_lastCompactionIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_lastAppliedIndex = _leaderCommitIndex;
_lastCompactionIndex = _leaderCommitIndex;
return _lastAppliedIndex;
@ -1195,9 +1204,11 @@ void Agent::compact() {
/// Last commit index
arangodb::consensus::index_t Agent::lastCommitted() const {
std::pair<arangodb::consensus::index_t, arangodb::consensus::index_t>
Agent::lastCommitted() const {
MUTEX_LOCKER(ioLocker, _ioLock);
return _lastCommitIndex;
return std::pair<arangodb::consensus::index_t, arangodb::consensus::index_t>(
_lastCommitIndex,_leaderCommitIndex);
}
/// Last commit index
@ -1382,8 +1393,42 @@ bool Agent::ready() const {
return true;
}
return _ready.load();
return _ready;
}
query_t Agent::buildDB(arangodb::consensus::index_t index) {
auto builder = std::make_shared<VPackBuilder>();
arangodb::consensus::index_t start = 0, end = 0;
Store store(this);
{
MUTEX_LOCKER(ioLocker, _ioLock);
store = _compacted;
MUTEX_LOCKER(liLocker, _liLock);
end = _leaderCommitIndex;
start = _lastCompactionIndex+1;
}
if (index > end) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Cannot snapshot beyond leaderCommitIndex: " << end;
index = end;
} else if (index < start) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Cannot snapshot before last compaction index: " << start;
index = start+1;
}
store.apply(_state.slices(start+1, index), index, _constituent.term());
store.toBuilder(*builder);
return builder;
}
}} // namespace

View File

@ -77,7 +77,7 @@ class Agent : public arangodb::Thread {
bool fitness() const;
/// @brief Leader ID
index_t lastCommitted() const;
std::pair<index_t, index_t> lastCommitted() const;
/// @brief Leader ID
std::string leaderID() const;
@ -222,6 +222,9 @@ class Agent : public arangodb::Thread {
/// @brief Update a peers endpoint in my configuration
void updatePeerEndpoint(std::string const& id, std::string const& ep);
/// @brief Assemble an agency to commitId
query_t buildDB(index_t);
/// @brief State reads persisted state and prepares the agent
friend class State;
friend class Compactor;
@ -270,6 +273,9 @@ class Agent : public arangodb::Thread {
/// @brief Last compaction index
index_t _lastAppliedIndex;
/// @brief Last compaction index
index_t _lastCompactionIndex;
/// @brief Last compaction index
index_t _leaderCommitIndex;
@ -282,6 +288,9 @@ class Agent : public arangodb::Thread {
/// @brief Committed (read) kv-store
Store _transient;
/// @brief Last compacted store
Store _compacted;
/// @brief Condition variable for appendEntries
arangodb::basics::ConditionVariable _appendCV;
@ -326,6 +335,7 @@ class Agent : public arangodb::Thread {
/// @brief Agent is ready for RAFT
std::atomic<bool> _ready;
std::atomic<bool> _preparing;
/// @brief Keep track of when I last took on leadership
TimePoint _leaderSince;

View File

@ -39,23 +39,43 @@ AgentCallback::AgentCallback(Agent* agent, std::string const& slaveID,
void AgentCallback::shutdown() { _agent = 0; }
bool AgentCallback::operator()(arangodb::ClusterCommResult* res) {
if (res->status == CL_COMM_SENT) {
if (_agent) {
_agent->reportIn(_slaveID, _last, _toLog);
try { // Check success
if (res->result->getBodyVelocyPack()->slice().get("success").getBool()) {
_agent->reportIn(_slaveID, _last, _toLog);
}
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "success: true " << res->result->getBodyVelocyPack()->toJson();
} catch (...) {
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "success: false" << res->result->getBodyVelocyPack()->toJson();
}
}
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Got good callback from AppendEntriesRPC: "
<< "comm_status(" << res->status
<< "), last(" << _last << "), follower("
<< _slaveID << "), time("
<< TRI_microtime() - _startTime << ")";
} else {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Got bad callback from AppendEntriesRPC: "
<< "comm_status(" << res->status
<< "), last(" << _last << "), follower("
<< _slaveID << "), time("
<< TRI_microtime() - _startTime << ")";
}
return true;
}

View File

@ -28,7 +28,6 @@
#include "Agency/MoveShard.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
std::string const& jobId,

View File

@ -146,6 +146,21 @@ void Constituent::termNoLock(term_t t) {
}
}
bool Constituent::logUpToDate(
arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const {
log_t myLastLogEntry = _agent->state().lastLog();
return (prevLogTerm > myLastLogEntry.term ||
(prevLogTerm == myLastLogEntry.term &&
prevLogIndex >= myLastLogEntry.index));
}
bool Constituent::logMatches(
arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const {
return _agent->state().has(prevLogIndex, prevLogTerm);
}
/// My role
role_t Constituent::role() const {
MUTEX_LOCKER(guard, _castLock);
@ -257,8 +272,8 @@ std::string Constituent::endpoint(std::string id) const {
}
/// @brief Check leader
bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex,
term_t prevLogTerm) {
bool Constituent::checkLeader(
term_t term, std::string id, index_t prevLogIndex, term_t prevLogTerm) {
TRI_ASSERT(_vocbase != nullptr);
@ -277,6 +292,11 @@ bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex,
if (term > _term) {
termNoLock(term);
}
if (!logMatches(prevLogIndex,prevLogTerm)) {
return false;
}
if (_leaderID != id) {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Set _leaderID to " << id << " in term " << _term;
@ -421,7 +441,7 @@ void Constituent::callElection() {
auto res = ClusterComm::instance()->wait(
"", coordinatorTransactionID, 0, "",
duration<double>(steady_clock::now()-timeout).count());
duration<double>(timeout - steady_clock::now()).count());
if (res.status == CL_COMM_SENT) {
auto body = res.result->getBodyVelocyPack();
@ -571,6 +591,11 @@ void Constituent::run() {
if (_lastHeartbeatSeen > 0.0) {
double now = TRI_microtime();
randWait -= static_cast<int64_t>(M * (now-_lastHeartbeatSeen));
if (randWait < a) {
randWait = a;
} else if (randWait > b) {
randWait = b;
}
}
}

View File

@ -126,6 +126,12 @@ class Constituent : public Thread {
// Wait for sync
bool waitForSync() const;
// Check if log up to date with ours
bool logUpToDate(index_t, term_t) const;
// Check if log start matches entry in my log
bool logMatches(index_t, term_t) const;
// Sleep for how long
duration_t sleepFor(double, double);

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedFollower::FailedFollower(Node const& snapshot, Agent* agent,
std::string const& jobId,
@ -122,9 +121,12 @@ bool FailedFollower::start() {
// DBservers
std::string planPath =
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
std::string curPath =
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
Node const& planned = _snapshot(planPath);
// Copy todo to pending
Builder todo, pending;

View File

@ -30,7 +30,6 @@
#include <vector>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,
@ -173,17 +172,23 @@ bool FailedLeader::start() {
// Distribute shards like to come!
std::vector<std::string> planv;
for (auto const& i : VPackArrayIterator(planned)) {
planv.push_back(i.copyString());
auto s = i.copyString();
if (s != _from && s != _to) {
planv.push_back(i.copyString());
}
}
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
pending.add(VPackValue(_to));
for (auto const& i : VPackArrayIterator(current)) {
std::string s = i.copyString();
if (s != _from) {
if (s != _from && s != _to) {
pending.add(i);
planv.erase(std::remove(planv.begin(), planv.end(), s), planv.end());
}
}
pending.add(VPackValue(_from));
for (auto const& i : planv) {
pending.add(VPackValue(i));

View File

@ -30,7 +30,6 @@
#include "Agency/UnassumedLeadership.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedServer::FailedServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,
@ -286,7 +285,9 @@ JOB_STATUS FailedServer::status() {
deleteTodos->openArray();
deleteTodos->openObject();
}
deleteTodos->add(_agencyPrefix + toDoPrefix + subJob.first, VPackValue(VPackValueType::Object));
deleteTodos->add(
_agencyPrefix + toDoPrefix + subJob.first,
VPackValue(VPackValueType::Object));
deleteTodos->add("op", VPackValue("delete"));
deleteTodos->close();
} else {
@ -302,7 +303,9 @@ JOB_STATUS FailedServer::status() {
}
if (deleteTodos) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Server " << _server << " is healthy again. Will try to delete any jobs which have not yet started!";
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Server " << _server << " is healthy again. Will try to delete"
"any jobs which have not yet started!";
deleteTodos->close();
deleteTodos->close();
// Transact to agency

View File

@ -36,7 +36,6 @@
#include <thread>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
Inception::Inception() : Thread("Inception"), _agent(nullptr) {}

View File

@ -24,7 +24,6 @@
#include "Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
if (!plan.isArray() || !current.isArray()) {

View File

@ -28,7 +28,6 @@
#include "Node.h"
#include "Supervision.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
@ -42,7 +41,7 @@ namespace consensus {
// and all others followers. Both arguments must be arrays. Returns true,
// if the first items in both slice are equal and if both arrays contain
// the same set of strings.
bool compareServerLists(arangodb::velocypack::Slice plan, arangodb::velocypack::Slice current);
bool compareServerLists(Slice plan, Slice current);
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers";
static std::string const healthPrefix = "/Supervision/Health/";
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
arangodb::velocypack::Builder const& transaction,
Builder const& transaction,
bool waitForCommit = true) {
query_t envelope = std::make_shared<arangodb::velocypack::Builder>();
query_t envelope = std::make_shared<Builder>();
try {
envelope->openArray();
@ -138,7 +137,7 @@ struct Job {
std::string _creator;
std::string _agencyPrefix;
std::shared_ptr<arangodb::velocypack::Builder> _jb;
std::shared_ptr<Builder> _jb;
};

View File

@ -29,7 +29,6 @@
static std::string const DBServer = "DBServer";
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -33,9 +33,8 @@
#include <deque>
#include <regex>
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::basics;
struct NotEmpty {
bool operator()(const std::string& s) { return !s.empty(); }
@ -700,28 +699,6 @@ void Node::toBuilder(Builder& builder, bool showHidden) const {
}
}
void Node::toObject(Builder& builder, bool showHidden) const {
try {
if (type() == NODE) {
VPackObjectBuilder guard(&builder);
for (auto const& child : _children) {
if (child.first[0] == '.' && !showHidden) {
continue;
}
builder.add(VPackValue(child.first));
child.second->toBuilder(builder);
}
} else {
if (!slice().isNone()) {
builder.add(slice());
}
}
} catch (std::exception const& e) {
LOG_TOPIC(ERR, Logger::AGENCY) << e.what() << " " << __FILE__ << __LINE__;
}
}
// Print internals to ostream
std::ostream& Node::print(std::ostream& o) const {
Node const* par = _parent;

View File

@ -27,9 +27,6 @@
#include "AgencyCommon.h"
#include <velocypack/Buffer.h>
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
#include <velocypack/ValueType.h>
#include <velocypack/velocypack-aliases.h>
#include <type_traits>
@ -53,6 +50,8 @@ enum Operation {
REPLACE
};
using namespace arangodb::velocypack;
class StoreException : public std::exception {
public:
explicit StoreException(std::string const& message) : _message(message) {}
@ -162,10 +161,7 @@ class Node {
bool handle(arangodb::velocypack::Slice const&);
/// @brief Create Builder representing this store
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
/// @brief Create Builder representing this store
void toObject(arangodb::velocypack::Builder&, bool showHidden = false) const;
void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Access children
Children& children();
@ -174,10 +170,10 @@ class Node {
Children const& children() const;
/// @brief Create slice from value
arangodb::velocypack::Slice slice() const;
Slice slice() const;
/// @brief Get value type
arangodb::velocypack::ValueType valueType() const;
ValueType valueType() const;
/// @brief Add observer for this node
bool addObserver(std::string const&);
@ -222,7 +218,7 @@ class Node {
std::string getString() const;
/// @brief Get array value
arangodb::velocypack::Slice getArray() const;
Slice getArray() const;
protected:
/// @brief Add time to live entry
@ -238,8 +234,8 @@ class Node {
Store* _store; ///< @brief Store
Children _children; ///< @brief child nodes
TimePoint _ttl; ///< @brief my expiry
std::vector<arangodb::velocypack::Buffer<uint8_t>> _value; ///< @brief my value
mutable arangodb::velocypack::Buffer<uint8_t> _vecBuf;
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
mutable Buffer<uint8_t> _vecBuf;
mutable bool _vecBufDirty;
bool _isArray;
};

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -32,12 +32,13 @@
#include "Basics/StaticStrings.h"
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::rest;
using namespace arangodb::velocypack;
using namespace arangodb::consensus;
////////////////////////////////////////////////////////////////////////////////
/// @brief ArangoDB server
@ -218,6 +219,31 @@ RestStatus RestAgencyHandler::handleStores() {
return RestStatus::DONE;
}
RestStatus RestAgencyHandler::handleStore() {
if (_request->requestType() == rest::RequestType::POST) {
arangodb::velocypack::Options options;
auto query = _request->toVelocyPackBuilderPtr(&options);
arangodb::consensus::index_t index = 0;
try {
index = query->slice().getUInt();
} catch (...) {
index = _agent->lastCommitted().second;
}
query_t builder = _agent->buildDB(index);
generateResult(rest::ResponseCode::OK, builder->slice());
} else {
generateError(rest::ResponseCode::BAD, 400);
}
return RestStatus::DONE;
}
RestStatus RestAgencyHandler::handleWrite() {
if (_request->requestType() != rest::RequestType::POST) {
@ -624,12 +650,14 @@ RestStatus RestAgencyHandler::handleConfig() {
}
// Respond with configuration
auto last = _agent->lastCommitted();
Builder body;
{
VPackObjectBuilder b(&body);
body.add("term", Value(_agent->term()));
body.add("leaderId", Value(_agent->leaderID()));
body.add("lastCommitted", Value(_agent->lastCommitted()));
body.add("lastCommitted", Value(last.first));
body.add("leaderCommitted", Value(last.second));
body.add("lastAcked", _agent->lastAckedAgo()->slice());
body.add("configuration", _agent->config().toBuilder()->slice());
}
@ -691,6 +719,8 @@ RestStatus RestAgencyHandler::execute() {
return handleState();
} else if (suffixes[0] == "stores") {
return handleStores();
} else if (suffixes[0] == "store") {
return handleStore();
} else {
return reportUnknownMethod();
}

View File

@ -47,6 +47,7 @@ class RestAgencyHandler : public RestBaseHandler {
RestStatus reportTooManySuffices();
RestStatus reportUnknownMethod();
RestStatus handleStores();
RestStatus handleStore();
RestStatus handleRead();
RestStatus handleWrite();
RestStatus handleTransact();

View File

@ -32,6 +32,7 @@
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;

View File

@ -315,21 +315,66 @@ std::vector<log_t> State::get(arangodb::consensus::index_t start,
return entries;
}
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.size() - 1) {
end = _log.size() - 1;
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.back().index) {
end = _log.back().index;
}
if (start < _log[0].index) {
start = _log[0].index;
}
for (size_t i = start - _cur; i <= end; ++i) {
for (size_t i = start - _cur; i <= end - _cur; ++i) {
entries.push_back(_log[i]);
}
return entries;
}
/// Get log entries from indices "start" to "end"
/// Throws std::out_of_range exception
log_t State::at(arangodb::consensus::index_t index) const {
MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction)
if (_cur > index) {
std::string excMessage =
std::string(
"Access before the start of the log deque: (first, requested): (") +
std::to_string(_cur) + ", " + std::to_string(index);
LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage;
throw std::out_of_range(excMessage);
}
auto pos = index - _cur;
if (pos > _log.size()) {
std::string excMessage =
std::string(
"Access beyond the end of the log deque: (last, requested): (") +
std::to_string(_cur+_log.size()) + ", " + std::to_string(index);
LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage;
throw std::out_of_range(excMessage);
}
return _log[pos];
}
/// Have log with specified index and term
bool State::has(arangodb::consensus::index_t index, term_t term) const {
MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction)
try {
return _log.at(index-_cur).term == term;
} catch (...) {}
return false;
}
/// Get vector of past transaction from 'start' to 'end'
std::vector<VPackSlice> State::slices(arangodb::consensus::index_t start,
arangodb::consensus::index_t end) const {
@ -906,3 +951,9 @@ std::vector<std::vector<log_t>> State::inquire(query_t const& query) const {
}
// Index of last log entry
arangodb::consensus::index_t State::lastIndex() const {
MUTEX_LOCKER(mutexLocker, _logLock);
return (!_log.empty()) ? _log.back().index : 0;
}

View File

@ -66,21 +66,27 @@ class State {
std::vector<bool> const& indices, term_t term);
/// @brief Single log entry (leader)
arangodb::consensus::index_t log(
velocypack::Slice const& slice, term_t term,
std::string const& clientId = std::string());
index_t log(velocypack::Slice const& slice, term_t term,
std::string const& clientId = std::string());
/// @brief Log entries (followers)
arangodb::consensus::index_t log(query_t const& queries, size_t ndups = 0);
/// @brief Find entry at index with term
bool find(index_t index, term_t term);
/// @brief Get complete log entries bound by lower and upper bounds.
/// Default: [first, last]
std::vector<log_t> get(
index_t = 0, index_t = (std::numeric_limits<uint64_t>::max)()) const;
index_t = 0, index_t = (std::numeric_limits<uint64_t>::max)()) const;
/// @brief Get complete log entries bound by lower and upper bounds.
/// Default: [first, last]
log_t at(index_t) const;
/// @brief Has entry with index und term
bool has(index_t, term_t) const;
/// @brief Get log entries by client Id
std::vector<std::vector<log_t>> inquire(query_t const&) const;
@ -96,6 +102,10 @@ class State {
/// after the return
log_t lastLog() const;
/// @brief last log entry, copy entry because we do no longer have the lock
/// after the return
index_t lastIndex() const;
/// @brief Set endpoint
bool configure(Agent* agent);

View File

@ -40,9 +40,8 @@
#include <iomanip>
#include <regex>
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::basics;
/// Non-Emptyness of string
struct NotEmpty {
@ -353,15 +352,11 @@ std::vector<bool> Store::apply(
auto headerFields =
std::make_unique<std::unordered_map<std::string, std::string>>();
auto cc = ClusterComm::instance();
if (cc != nullptr) {
// nullptr only happens on controlled shutdown
cc->asyncRequest(
"1", 1, endpoint, rest::RequestType::POST, path,
std::make_shared<std::string>(body.toString()), headerFields,
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true,
0.01);
}
arangodb::ClusterComm::instance()->asyncRequest(
"1", 1, endpoint, rest::RequestType::POST, path,
std::make_shared<std::string>(body.toString()), headerFields,
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true, 0.01);
} else {
LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url;
}

View File

@ -60,10 +60,10 @@ class Store : public arangodb::Thread {
std::vector<bool> apply(query_t const& query, bool verbose = false);
/// @brief Apply single entry in query
bool apply(arangodb::velocypack::Slice const& query, bool verbose = false);
bool apply(Slice const& query, bool verbose = false);
/// @brief Apply entry in query
std::vector<bool> apply(std::vector<arangodb::velocypack::Slice> const& query,
std::vector<bool> apply(std::vector<Slice> const& query,
index_t lastCommitIndex, term_t term,
bool inform = true);
@ -81,7 +81,7 @@ class Store : public arangodb::Thread {
bool start();
/// @brief Dump everything to builder
void dumpToBuilder(arangodb::velocypack::Builder&) const;
void dumpToBuilder(Builder&) const;
/// @brief Notify observers
void notifyObservers() const;
@ -92,7 +92,7 @@ class Store : public arangodb::Thread {
Store& operator=(VPackSlice const& slice);
/// @brief Create Builder representing this store
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Copy out a node
Node get(std::string const& path = std::string("/")) const;

View File

@ -41,9 +41,9 @@
#include "Basics/MutexLocker.h"
using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::application_features;
std::string Supervision::_agencyPrefix = "/arango";
@ -552,11 +552,13 @@ void Supervision::handleShutdown() {
del->close();
auto result = _agent->write(del);
if (result.indices.size() != 1) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Invalid resultsize of " << result.indices.size()
<< " found during shutdown";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Invalid resultsize of " << result.indices.size()
<< " found during shutdown";
} else {
if (_agent->waitFor(result.indices.at(0)) != Agent::raft_commit_t::OK) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Result was not written to followers during shutdown";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Result was not written to followers during shutdown";
}
}
}

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
UnassumedLeadership::UnassumedLeadership(
Node const& snapshot, Agent* agent, std::string const& jobId,

View File

@ -39,7 +39,6 @@ using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);

View File

@ -42,11 +42,10 @@ int AqlTransaction::processCollection(aql::Collection* collection) {
/// @brief add a coordinator collection to the transaction
int AqlTransaction::processCollectionCoordinator(aql::Collection* collection) {
TRI_voc_cid_t cid =
this->resolver()->getCollectionId(collection->getName());
TRI_voc_cid_t cid = resolver()->getCollectionId(collection->getName());
return this->addCollection(cid, collection->getName().c_str(),
collection->accessType);
return addCollection(cid, collection->getName().c_str(),
collection->accessType);
}
/// @brief add a regular collection to the transaction
@ -55,7 +54,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) {
TRI_voc_cid_t cid = 0;
arangodb::LogicalCollection const* col =
this->resolver()->getCollectionStruct(collection->getName());
resolver()->getCollectionStruct(collection->getName());
/*if (col == nullptr) {
auto startTime = TRI_microtime();
auto endTime = startTime + 60.0;
@ -72,8 +71,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) {
cid = col->cid();
}
int res =
this->addCollection(cid, collection->getName(), collection->accessType);
int res = addCollection(cid, collection->getName(), collection->accessType);
if (res == TRI_ERROR_NO_ERROR && col != nullptr) {
collection->setCollection(const_cast<arangodb::LogicalCollection*>(col));

View File

@ -62,14 +62,14 @@ class AqlTransaction final : public transaction::Methods {
/// @brief add a list of collections to the transaction
int addCollections(
std::map<std::string, aql::Collection*> const& collections) {
int ret = TRI_ERROR_NO_ERROR;
for (auto const& it : collections) {
ret = processCollection(it.second);
if (ret != TRI_ERROR_NO_ERROR) {
break;
int res = processCollection(it.second);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
return ret;
return TRI_ERROR_NO_ERROR;
}
/// @brief add a collection to the transaction

View File

@ -853,7 +853,9 @@ void AqlValue::toVelocyPack(transaction::Methods* trx,
case VPACK_INLINE:
case VPACK_MANAGED: {
if (resolveExternals) {
arangodb::basics::VelocyPackHelper::SanitizeExternals(slice(), builder);
bool const sanitizeExternals = true;
bool const sanitizeCustom = true;
arangodb::basics::VelocyPackHelper::sanitizeNonClientTypes(slice(), VPackSlice::noneSlice(), builder, trx->transactionContextPtr()->getVPackOptions(), sanitizeExternals, sanitizeCustom);
} else {
builder.add(slice());
}

View File

@ -95,12 +95,7 @@ std::unordered_map<int, AstNodeType> const Ast::ReversedOperators{
/// @brief create the AST
Ast::Ast(Query* query)
: _query(query),
_scopes(),
_variables(),
_bindParameters(),
_root(nullptr),
_queries(),
_writeCollections(),
_functionsMayAccessDocuments(false),
_containsTraversal(false) {
TRI_ASSERT(_query != nullptr);

View File

@ -40,14 +40,10 @@ namespace velocypack {
class Slice;
}
namespace transaction {
class Methods;
}
;
namespace aql {
class Query;
class VariableGenerator;
typedef std::unordered_map<Variable const*, std::unordered_set<std::string>>
TopLevelAttributes;
@ -58,7 +54,7 @@ class Ast {
public:
/// @brief create the AST
Ast(Query*);
explicit Ast(Query*);
/// @brief destroy the AST
~Ast();
@ -69,12 +65,7 @@ class Ast {
/// @brief return the variable generator
inline VariableGenerator* variables() { return &_variables; }
/// @brief return the variable generator
inline VariableGenerator* variables() const {
return const_cast<VariableGenerator*>(&_variables);
}
/// @brief return the root of the AST
inline AstNode const* root() const { return _root; }

View File

@ -39,7 +39,9 @@ class BindParameters {
public:
BindParameters(BindParameters const&) = delete;
BindParameters& operator=(BindParameters const&) = delete;
BindParameters() = delete;
BindParameters()
: _builder(nullptr), _parameters(), _processed(false) {}
/// @brief create the parameters
explicit BindParameters(std::shared_ptr<arangodb::velocypack::Builder> builder)

View File

@ -25,6 +25,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Basics/ScopeGuard.h"
#include "Basics/VelocyPackHelper.h"

View File

@ -29,6 +29,7 @@
#include "Aql/Collection.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionStats.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"

View File

@ -25,6 +25,7 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
using namespace arangodb::basics;
using namespace arangodb::aql;

View File

@ -617,14 +617,11 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
// cleanup function for group values
auto cleanup = [&allGroups]() -> void {
for (auto& it : allGroups) {
if (it.second != nullptr) {
for (auto& it2 : *(it.second)) {
delete it2;
}
delete it.second;
for (auto& it2 : *(it.second)) {
delete it2;
}
delete it.second;
}
allGroups.clear();
};
// prevent memory leaks by always cleaning up the groups
@ -643,8 +640,8 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
size_t row = 0;
for (auto& it : allGroups) {
auto& keys = it.first;
TRI_ASSERT(it.second != nullptr);
TRI_ASSERT(keys.size() == _groupRegisters.size());
size_t i = 0;
@ -653,7 +650,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
const_cast<AqlValue*>(&key)->erase(); // to prevent double-freeing later
}
if (it.second != nullptr && !en->_count) {
if (!en->_count) {
TRI_ASSERT(it.second->size() == _aggregateRegisters.size());
size_t j = 0;
for (auto const& r : *(it.second)) {
@ -662,7 +659,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
}
} else if (en->_count) {
// set group count in result register
TRI_ASSERT(it.second != nullptr);
TRI_ASSERT(!it.second->empty());
result->setValue(row, _collectRegister,
it.second->back()->stealValue());
}
@ -749,10 +746,12 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
if (en->_aggregateVariables.empty()) {
// no aggregate registers. simply increase the counter
if (en->_count) {
TRI_ASSERT(!aggregateValues->empty());
aggregateValues->back()->reduce(AqlValue());
}
} else {
// apply the aggregators for the group
TRI_ASSERT(aggregateValues->size() == _aggregateRegisters.size());
size_t j = 0;
for (auto const& r : _aggregateRegisters) {
(*aggregateValues)[j]->reduce(

View File

@ -37,19 +37,16 @@ namespace arangodb {
namespace transaction {
class Methods;
}
;
namespace aql {
struct Aggregator;
class AqlItemBlock;
class ExecutionEngine;
typedef std::vector<Aggregator*> AggregateValuesType;
class SortedCollectBlock final : public ExecutionBlock {
private:
typedef std::vector<Aggregator*> AggregateValuesType;
struct CollectGroup {
std::vector<AqlValue> groupValues;

View File

@ -24,6 +24,7 @@
#include "CollectNode.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/VariableGenerator.h"
#include "Aql/WalkerWorker.h"
using namespace arangodb::aql;

View File

@ -52,6 +52,8 @@ class Collections {
std::map<std::string, Collection*> const* collections() const;
bool empty() const { return _collections.empty(); }
private:
TRI_vocbase_t* _vocbase;

View File

@ -26,6 +26,7 @@
#include "Aql/AstNode.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Aql/SortCondition.h"
#include "Aql/Variable.h"
#include "Basics/Exceptions.h"

View File

@ -26,6 +26,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Cluster/FollowerInfo.h"
#include "StorageEngine/DocumentIdentifierToken.h"

View File

@ -28,6 +28,7 @@
#include "Aql/Ast.h"
#include "Aql/BlockCollector.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
using namespace arangodb::aql;

View File

@ -34,6 +34,7 @@
#include "Aql/ExecutionNode.h"
#include "Aql/IndexBlock.h"
#include "Aql/ModificationBlocks.h"
#include "Aql/Query.h"
#include "Aql/SortBlock.h"
#include "Aql/SubqueryBlock.h"
#include "Aql/TraversalBlock.h"

View File

@ -31,6 +31,7 @@
#include "Aql/ExecutionPlan.h"
#include "Aql/IndexNode.h"
#include "Aql/ModificationNodes.h"
#include "Aql/Query.h"
#include "Aql/SortNode.h"
#include "Aql/TraversalNode.h"
#include "Aql/ShortestPathNode.h"
@ -1375,7 +1376,7 @@ ExecutionNode* CalculationNode::clone(ExecutionPlan* plan,
outVariable = plan->getAst()->variables()->createVariable(outVariable);
}
auto c = new CalculationNode(plan, _id, _expression->clone(),
auto c = new CalculationNode(plan, _id, _expression->clone(plan->getAst()),
conditionVariable, outVariable);
c->_canRemoveIfThrows = _canRemoveIfThrows;

View File

@ -228,8 +228,6 @@ void ExecutionPlan::getCollectionsFromVelocyPack(Ast* ast,
}
for (auto const& collection : VPackArrayIterator(collectionsSlice)) {
auto typeStr = arangodb::basics::VelocyPackHelper::checkAndGetStringValue(
collection, "type");
ast->query()->collections()->add(
arangodb::basics::VelocyPackHelper::checkAndGetStringValue(collection,
"name"),
@ -276,8 +274,8 @@ class CloneNodeAdder final : public WalkerWorker<ExecutionNode> {
};
/// @brief clone an existing execution plan
ExecutionPlan* ExecutionPlan::clone() {
auto plan = std::make_unique<ExecutionPlan>(_ast);
ExecutionPlan* ExecutionPlan::clone(Ast* ast) {
auto plan = std::make_unique<ExecutionPlan>(ast);
plan->_root = _root->clone(plan.get(), true, false);
plan->_nextId = _nextId;
@ -297,13 +295,19 @@ ExecutionPlan* ExecutionPlan::clone() {
return plan.release();
}
/// @brief clone an existing execution plan
ExecutionPlan* ExecutionPlan::clone() {
return clone(_ast);
}
/// @brief create an execution plan identical to this one
/// keep the memory of the plan on the query object specified.
ExecutionPlan* ExecutionPlan::clone(Query const& query) {
auto otherPlan = std::make_unique<ExecutionPlan>(query.ast());
for (auto const& it : _ids) {
otherPlan->registerNode(it.second->clone(otherPlan.get(), false, true));
auto clonedNode = it.second->clone(otherPlan.get(), false, true);
otherPlan->registerNode(clonedNode);
}
return otherPlan.release();

View File

@ -28,7 +28,6 @@
#include "Aql/CollectOptions.h"
#include "Aql/ExecutionNode.h"
#include "Aql/ModificationOptions.h"
#include "Aql/Query.h"
#include "Aql/types.h"
#include "Basics/SmallVector.h"
@ -40,6 +39,7 @@ struct AstNode;
class CalculationNode;
class CollectNode;
class ExecutionNode;
class Query;
class ExecutionPlan {
public:
@ -60,6 +60,8 @@ class ExecutionPlan {
/// @brief create an execution plan from VelocyPack
static ExecutionPlan* instantiateFromVelocyPack(
Ast* ast, arangodb::velocypack::Slice const);
ExecutionPlan* clone(Ast*);
/// @brief clone the plan by recursively cloning starting from the root
ExecutionPlan* clone();
@ -69,9 +71,9 @@ class ExecutionPlan {
ExecutionPlan* clone(Query const&);
/// @brief export to VelocyPack
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack(Ast*, bool) const;
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack(Ast*, bool verbose) const;
void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool) const;
void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool verbose) const;
/// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); }

View File

@ -107,10 +107,10 @@ class Expression {
}
/// @brief clone the expression, needed to clone execution plans
Expression* clone() {
Expression* clone(Ast* ast) {
// We do not need to copy the _ast, since it is managed by the
// query object and the memory management of the ASTs
return new Expression(_ast, _node);
return new Expression(ast != nullptr ? ast : _ast, _node);
}
/// @brief return all variables used in the expression

View File

@ -411,13 +411,11 @@ void Functions::Stringify(transaction::Methods* trx,
return;
}
if (slice.isObject() || slice.isArray()) {
VPackDumper dumper(&buffer, trx->transactionContextPtr()->getVPackOptions());
dumper.dump(slice);
return;
}
VPackDumper dumper(&buffer);
VPackOptions* options = trx->transactionContextPtr()->getVPackOptionsForDump();
VPackOptions adjustedOptions = *options;
adjustedOptions.escapeUnicode = false;
adjustedOptions.escapeForwardSlashes = false;
VPackDumper dumper(&buffer, &adjustedOptions);
dumper.dump(slice);
}
@ -2270,7 +2268,7 @@ AqlValue Functions::Zip(arangodb::aql::Query* query,
for (VPackValueLength i = 0; i < n; ++i) {
buffer->reset();
Stringify(trx, adapter, keysSlice.at(i));
builder->add(std::string(buffer->c_str(), buffer->length()), valuesSlice.at(i));
builder->add(buffer->c_str(), buffer->length(), valuesSlice.at(i));
}
builder->close();
return AqlValue(builder.get());

View File

@ -28,6 +28,7 @@
#include "Aql/Condition.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/ScopeGuard.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"

View File

@ -26,6 +26,7 @@
#include "Aql/Collection.h"
#include "Aql/Condition.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Transaction/Methods.h"
#include <velocypack/Iterator.h>

View File

@ -25,6 +25,8 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Aql/VariableGenerator.h"
using namespace arangodb::aql;

View File

@ -32,6 +32,7 @@
#include "Aql/Function.h"
#include "Aql/IndexNode.h"
#include "Aql/ModificationNodes.h"
#include "Aql/Query.h"
#include "Aql/ShortestPathNode.h"
#include "Aql/SortCondition.h"
#include "Aql/SortNode.h"

View File

@ -57,7 +57,7 @@ namespace aql {
class Parser {
public:
/// @brief create the parser
Parser(Query*);
explicit Parser(Query*);
/// @brief destroy the parser
~Parser();

98
arangod/Aql/PlanCache.cpp Normal file
View File

@ -0,0 +1,98 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "PlanCache.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Basics/ReadLocker.h"
#include "Basics/WriteLocker.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
using namespace arangodb::aql;
/// @brief singleton instance of the plan cache
static arangodb::aql::PlanCache Instance;
/// @brief create the plan cache
PlanCache::PlanCache() : _lock(), _plans() {}
/// @brief destroy the plan cache
PlanCache::~PlanCache() {}
/// @brief lookup a plan in the cache
std::shared_ptr<PlanCacheEntry> PlanCache::lookup(TRI_vocbase_t* vocbase, uint64_t hash,
char const* queryString,
size_t queryStringLength) {
READ_LOCKER(readLocker, _lock);
auto it = _plans.find(vocbase);
if (it == _plans.end()) {
// no entry found for the requested database
return std::shared_ptr<PlanCacheEntry>();
}
auto it2 = (*it).second.find(hash);
if (it2 == (*it).second.end()) {
// plan not found in cache
return std::shared_ptr<PlanCacheEntry>();
}
// plan found in cache
return (*it2).second;
}
/// @brief store a plan in the cache
void PlanCache::store(
TRI_vocbase_t* vocbase, uint64_t hash, char const* queryString,
size_t queryStringLength, ExecutionPlan const* plan) {
auto entry = std::make_unique<PlanCacheEntry>(std::string(queryString, queryStringLength), plan->toVelocyPack(plan->getAst(), true));
WRITE_LOCKER(writeLocker, _lock);
auto it = _plans.find(vocbase);
if (it == _plans.end()) {
// create entry for the current database
it = _plans.emplace(vocbase, std::unordered_map<uint64_t, std::shared_ptr<PlanCacheEntry>>()).first;
}
// store cache entry
(*it).second.emplace(hash, std::move(entry));
}
/// @brief invalidate all queries for a particular database
void PlanCache::invalidate(TRI_vocbase_t* vocbase) {
WRITE_LOCKER(writeLocker, _lock);
_plans.erase(vocbase);
}
/// @brief get the plan cache instance
PlanCache* PlanCache::instance() { return &Instance; }

84
arangod/Aql/PlanCache.h Normal file
View File

@ -0,0 +1,84 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_AQL_PLAN_CACHE_H
#define ARANGOD_AQL_PLAN_CACHE_H 1
#include "Basics/Common.h"
#include "Basics/ReadWriteLock.h"
struct TRI_vocbase_t;
namespace arangodb {
namespace velocypack {
class Builder;
}
namespace aql {
class ExecutionPlan;
class VariableGenerator;
struct PlanCacheEntry {
PlanCacheEntry(std::string&& queryString,
std::shared_ptr<arangodb::velocypack::Builder> builder)
: queryString(std::move(queryString)), builder(builder) {}
std::string queryString;
std::shared_ptr<arangodb::velocypack::Builder> builder;
};
class PlanCache {
public:
PlanCache(PlanCache const&) = delete;
PlanCache& operator=(PlanCache const&) = delete;
/// @brief create cache
PlanCache();
/// @brief destroy the cache
~PlanCache();
public:
/// @brief lookup a plan in the cache
std::shared_ptr<PlanCacheEntry> lookup(TRI_vocbase_t*, uint64_t, char const*, size_t);
/// @brief store a plan in the cache
void store(TRI_vocbase_t*, uint64_t, char const*, size_t, ExecutionPlan const*);
/// @brief invalidate all plans for a particular database
void invalidate(TRI_vocbase_t*);
/// @brief get the pointer to the global plan cache
static PlanCache* instance();
private:
/// @brief read-write lock for the cache
arangodb::basics::ReadWriteLock _lock;
/// @brief cached query plans, organized per database
std::unordered_map<TRI_vocbase_t*, std::unordered_map<uint64_t, std::shared_ptr<PlanCacheEntry>>> _plans;
};
}
}
#endif

View File

@ -31,6 +31,7 @@
#include "Aql/Executor.h"
#include "Aql/Optimizer.h"
#include "Aql/Parser.h"
#include "Aql/PlanCache.h"
#include "Aql/QueryCache.h"
#include "Aql/QueryList.h"
#include "Basics/Exceptions.h"
@ -54,11 +55,17 @@
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
#ifndef USE_PLAN_CACHE
#undef USE_PLAN_CACHE
#endif
using namespace arangodb;
using namespace arangodb::aql;
namespace {
static std::atomic<TRI_voc_tick_t> NextQueryId(1);
constexpr uint64_t DontCache = 0;
}
/// @brief names of query phases / states
@ -148,20 +155,15 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_resourceMonitor(),
_resources(&_resourceMonitor),
_vocbase(vocbase),
_executor(nullptr),
_context(nullptr),
_queryString(queryString),
_queryLength(queryLength),
_queryStringLength(queryLength),
_queryBuilder(),
_bindParameters(bindParameters),
_options(options),
_collections(vocbase),
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
_maxWarningCount(10),
_warnings(),
_startTime(TRI_microtime()),
@ -221,20 +223,14 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_resourceMonitor(),
_resources(&_resourceMonitor),
_vocbase(vocbase),
_executor(nullptr),
_context(nullptr),
_queryString(nullptr),
_queryLength(0),
_queryStringLength(0),
_queryBuilder(queryStruct),
_bindParameters(nullptr),
_options(options),
_collections(vocbase),
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
_maxWarningCount(10),
_warnings(),
_startTime(TRI_microtime()),
@ -278,11 +274,9 @@ Query::~Query() {
}
cleanupPlanAndEngine(TRI_ERROR_INTERNAL); // abort the transaction
delete _profile;
_profile = nullptr;
_profile.reset();
delete _executor;
_executor = nullptr;
_executor.reset();
if (_context != nullptr) {
TRI_ASSERT(!_contextOwnedByExterior);
@ -300,8 +294,7 @@ Query::~Query() {
_context = nullptr;
}
delete _ast;
_ast = nullptr;
_ast.reset();
for (auto& it : _graphs) {
delete it.second;
@ -317,7 +310,7 @@ Query::~Query() {
/// the query
Query* Query::clone(QueryPart part, bool withPlan) {
auto clone =
std::make_unique<Query>(false, _vocbase, _queryString, _queryLength,
std::make_unique<Query>(false, _vocbase, _queryString, _queryStringLength,
std::shared_ptr<VPackBuilder>(), _options, part);
clone->_resourceMonitor = _resourceMonitor;
@ -373,7 +366,7 @@ std::string Query::extractRegion(int line, int column) const {
char c;
char const* p = _queryString;
while ((static_cast<size_t>(p - _queryString) < _queryLength) && (c = *p)) {
while ((static_cast<size_t>(p - _queryString) < _queryStringLength) && (c = *p)) {
if (currentLine > line ||
(currentLine >= line && currentColumn >= column)) {
break;
@ -406,9 +399,9 @@ std::string Query::extractRegion(int line, int column) const {
static int const SNIPPET_LENGTH = 32;
static char const* SNIPPET_SUFFIX = "...";
if (_queryLength < offset + SNIPPET_LENGTH) {
if (_queryStringLength < offset + SNIPPET_LENGTH) {
// return a copy of the region
return std::string(_queryString + offset, _queryLength - offset);
return std::string(_queryString + offset, _queryStringLength - offset);
}
// copy query part
@ -461,157 +454,195 @@ void Query::registerWarning(int code, char const* details) {
}
}
void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
TRI_ASSERT(registry != nullptr);
init();
enterState(PARSING);
std::unique_ptr<ExecutionPlan> plan;
#if USE_PLAN_CACHE
if (_queryString != nullptr &&
queryStringHash != DontCache &&
_part == PART_MAIN) {
// LOG_TOPIC(INFO, Logger::FIXME) << "trying to find query in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash;
// store & lookup velocypack plans!!
std::shared_ptr<PlanCacheEntry> planCacheEntry = PlanCache::instance()->lookup(_vocbase, queryStringHash, _queryString, _queryStringLength);
if (planCacheEntry != nullptr) {
// LOG_TOPIC(INFO, Logger::FIXME) << "query found in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "'";
TRI_ASSERT(_trx == nullptr);
TRI_ASSERT(_collections.empty());
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
VPackBuilder* builder = planCacheEntry->builder.get();
VPackSlice slice = builder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), slice);
_ast->variables()->fromVelocyPack(slice);
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = _trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), slice));
TRI_ASSERT(plan != nullptr);
}
}
#endif
if (plan == nullptr) {
plan.reset(prepare());
TRI_ASSERT(plan != nullptr);
#if USE_PLAN_CACHE
if (_queryString != nullptr &&
queryStringHash != DontCache &&
_part == PART_MAIN &&
_warnings.empty() &&
_ast->root()->isCacheable()) {
// LOG_TOPIC(INFO, Logger::FIXME) << "storing query in execution plan cache '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash;
PlanCache::instance()->store(_vocbase, queryStringHash, _queryString, _queryStringLength, plan.get());
}
#endif
}
enterState(EXECUTION);
TRI_ASSERT(_engine == nullptr);
// note that the engine returned here may already be present in our
// own _engine attribute (the instanciation procedure may modify us
// by calling our engine(ExecutionEngine*) function
// this is confusing and should be fixed!
std::unique_ptr<ExecutionEngine> engine(ExecutionEngine::instantiateFromPlan(registry, this, plan.get(), _queryString != nullptr));
if (_engine == nullptr) {
_engine = std::move(engine);
} else {
engine.release();
}
_plan = std::move(plan);
}
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
QueryResult Query::prepare(QueryRegistry* registry) {
ExecutionPlan* Query::prepare() {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::prepare"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
try {
init();
enterState(PARSING);
std::unique_ptr<ExecutionPlan> plan;
if (_queryString != nullptr) {
auto parser = std::make_unique<Parser>(this);
std::unique_ptr<ExecutionPlan> plan;
if (_queryString != nullptr) {
parser->parse(false);
// put in bind parameters
parser->ast()->injectBindParameters(_bindParameters);
}
parser->parse(false);
// put in bind parameters
parser->ast()->injectBindParameters(_bindParameters);
_isModificationQuery = parser->isModificationQuery();
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
try {
bool planRegisters;
// As soon as we start du instantiate the plan we have to clean it
// up before killing the unique_ptr
if (_queryString != nullptr) {
// we have an AST
// optimize the ast
enterState(AST_OPTIMIZATION);
parser->ast()->validateAndOptimize();
enterState(LOADING_COLLECTIONS);
int res = trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
if (plan.get() == nullptr) {
// oops
return QueryResult(TRI_ERROR_INTERNAL,
"failed to create query execution engine");
}
// Run the query optimizer:
enterState(PLAN_OPTIMIZATION);
arangodb::aql::Optimizer opt(maxNumberOfPlans());
// get enabled/disabled rules
opt.createPlans(plan.release(), getRulesFromOptions(),
inspectSimplePlans());
// Now plan and all derived plans belong to the optimizer
plan.reset(opt.stealBest()); // Now we own the best one again
planRegisters = true;
} else { // no queryString, we are instantiating from _queryBuilder
enterState(PARSING);
VPackSlice const querySlice = _queryBuilder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
parser->ast()->variables()->fromVelocyPack(querySlice);
// creating the plan may have produced some collections
// we need to add them to the transaction now (otherwise the query will
// fail)
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
}
enterState(PLAN_INSTANTIATION);
// we have an execution plan in VelocyPack format
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
parser->ast(), _queryBuilder->slice()));
if (plan.get() == nullptr) {
// oops
return QueryResult(TRI_ERROR_INTERNAL);
}
planRegisters = false;
}
TRI_ASSERT(plan.get() != nullptr);
// varsUsedLater and varsValid are unordered_sets and so their orders
// are not the same in the serialized and deserialized plans
// return the V8 context
exitContext();
enterState(EXECUTION);
ExecutionEngine* engine(ExecutionEngine::instantiateFromPlan(
registry, this, plan.get(), planRegisters));
// If all went well so far, then we keep _plan, _parser and _trx and
// return:
_plan = std::move(plan);
_parser = parser.release();
_engine = engine;
return QueryResult();
} catch (arangodb::basics::Exception const& ex) {
cleanupPlanAndEngine(ex.code());
return QueryResult(ex.code(), ex.message() + getStateString());
} catch (std::bad_alloc const&) {
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
return QueryResult(
TRI_ERROR_OUT_OF_MEMORY,
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
} catch (std::exception const& ex) {
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
} catch (...) {
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
return QueryResult(TRI_ERROR_INTERNAL,
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
} catch (arangodb::basics::Exception const& ex) {
return QueryResult(ex.code(), ex.message() + getStateString());
} catch (std::bad_alloc const&) {
return QueryResult(
TRI_ERROR_OUT_OF_MEMORY,
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
} catch (std::exception const& ex) {
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
} catch (...) {
return QueryResult(TRI_ERROR_INTERNAL,
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
TRI_ASSERT(_trx == nullptr);
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
// As soon as we start du instantiate the plan we have to clean it
// up before killing the unique_ptr
if (_queryString != nullptr) {
// we have an AST
// optimize the ast
enterState(AST_OPTIMIZATION);
_ast->validateAndOptimize();
enterState(LOADING_COLLECTIONS);
int res = _trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromAst(_ast.get()));
if (plan.get() == nullptr) {
// oops
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "failed to create query execution engine");
}
// Run the query optimizer:
enterState(PLAN_OPTIMIZATION);
arangodb::aql::Optimizer opt(maxNumberOfPlans());
// get enabled/disabled rules
opt.createPlans(plan.release(), getRulesFromOptions(),
inspectSimplePlans());
// Now plan and all derived plans belong to the optimizer
plan.reset(opt.stealBest()); // Now we own the best one again
} else { // no queryString, we are instantiating from _queryBuilder
VPackSlice const querySlice = _queryBuilder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), querySlice);
_ast->variables()->fromVelocyPack(querySlice);
// creating the plan may have produced some collections
// we need to add them to the transaction now (otherwise the query will
// fail)
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = _trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
// we have an execution plan in VelocyPack format
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), _queryBuilder->slice()));
if (plan.get() == nullptr) {
// oops
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "could not create plan from vpack");
}
}
TRI_ASSERT(plan != nullptr);
// varsUsedLater and varsValid are unordered_sets and so their orders
// are not the same in the serialized and deserialized plans
// return the V8 context if we are in one
exitContext();
return plan.release();
}
/// @brief execute an AQL query
@ -625,20 +656,17 @@ QueryResult Query::execute(QueryRegistry* registry) {
try {
bool useQueryCache = canUseQueryCache();
uint64_t queryStringHash = 0;
uint64_t queryStringHash = hash();
if (useQueryCache) {
// hash the query
queryStringHash = hash();
// check the query cache for an existing result
auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup(
_vocbase, queryStringHash, _queryString, _queryLength);
_vocbase, queryStringHash, _queryString, _queryStringLength);
arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry);
if (cacheEntry != nullptr) {
// got a result from the query cache
QueryResult res(TRI_ERROR_NO_ERROR);
QueryResult res;
// we don't have yet a transaction when we're here, so let's create
// a mimimal context to build the result
res.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
@ -651,18 +679,15 @@ QueryResult Query::execute(QueryRegistry* registry) {
}
}
QueryResult result = prepare(registry);
if (result.code != TRI_ERROR_NO_ERROR) {
return result;
}
// will throw if it fails
prepare(registry, queryStringHash);
if (_queryString == nullptr) {
// we don't have query string... now pass query id to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id));
} else {
// we do have a query string... pass query to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength));
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength));
}
log();
@ -672,20 +697,22 @@ QueryResult Query::execute(QueryRegistry* registry) {
useQueryCache = false;
}
AqlItemBlock* value = nullptr;
VPackOptions options = VPackOptions::Defaults;
options.buildUnindexedArrays = true;
options.buildUnindexedObjects = true;
TRI_ASSERT(_engine != nullptr);
auto resultBuilder = std::make_shared<VPackBuilder>(&options);
resultBuilder->buffer()->reserve(
16 * 1024); // reserve some space in Builder to avoid frequent reallocs
TRI_ASSERT(_engine != nullptr);
// this is the RegisterId our results can be found in
auto const resultRegister = _engine->resultRegister();
AqlItemBlock* value = nullptr;
try {
resultBuilder->openArray();
// this is the RegisterId our results can be found in
auto const resultRegister = _engine->resultRegister();
if (useQueryCache) {
// iterate over result, return it and store it in query cache
@ -712,7 +739,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
if (_warnings.empty()) {
// finally store the generated result in the query cache
auto result = QueryCache::instance()->store(
_vocbase, queryStringHash, _queryString, _queryLength,
_vocbase, queryStringHash, _queryString, _queryStringLength,
resultBuilder, _trx->state()->collectionNames());
if (result == nullptr) {
@ -742,9 +769,19 @@ QueryResult Query::execute(QueryRegistry* registry) {
delete value;
throw;
}
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::execute: before _trx->commit"
<< " this: " << (uintptr_t) this;
_trx->commit();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::execute: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this;
QueryResult result;
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
@ -752,7 +789,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
result.warnings = warningsToVelocyPack();
result.result = resultBuilder;
result.stats = stats;
@ -797,46 +834,48 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
std::unique_ptr<AqlWorkStack> work;
try {
bool useQueryCache = canUseQueryCache();
uint64_t queryStringHash = 0;
uint64_t queryStringHash = hash();
if (useQueryCache) {
// hash the query
queryStringHash = hash();
// check the query cache for an existing result
auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup(
_vocbase, queryStringHash, _queryString, _queryLength);
_vocbase, queryStringHash, _queryString, _queryStringLength);
arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry);
if (cacheEntry != nullptr) {
// got a result from the query cache
QueryResultV8 res(TRI_ERROR_NO_ERROR);
QueryResultV8 result;
// we don't have yet a transaction when we're here, so let's create
// a mimimal context to build the result
res.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
result.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
v8::Handle<v8::Value> values =
TRI_VPackToV8(isolate, cacheEntry->_queryResult->slice(),
res.context->getVPackOptions());
result.context->getVPackOptions());
TRI_ASSERT(values->IsArray());
res.result = v8::Handle<v8::Array>::Cast(values);
res.cached = true;
return res;
result.result = v8::Handle<v8::Array>::Cast(values);
result.cached = true;
return result;
}
}
QueryResultV8 result = prepare(registry);
if (result.code != TRI_ERROR_NO_ERROR) {
return result;
// will throw if it fails
prepare(registry, queryStringHash);
if (_queryString == nullptr) {
// we don't have query string... now pass query id to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id));
} else {
// we do have a query string... pass query to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength));
}
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength));
log();
if (useQueryCache && (_isModificationQuery || !_warnings.empty() ||
@ -844,6 +883,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
useQueryCache = false;
}
QueryResultV8 result;
result.result = v8::Array::New(isolate);
TRI_ASSERT(_engine != nullptr);
@ -884,7 +924,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
if (_warnings.empty()) {
// finally store the generated result in the query cache
QueryCache::instance()->store(_vocbase, queryStringHash, _queryString,
_queryLength, builder,
_queryStringLength, builder,
_trx->state()->collectionNames());
}
} else {
@ -930,6 +970,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
<< " this: " << (uintptr_t) this;
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
@ -946,6 +987,10 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
// patch executionTime stats value in place
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8:returning"
<< " this: " << (uintptr_t) this;
return result;
} catch (arangodb::basics::Exception const& ex) {
@ -1017,7 +1062,7 @@ QueryResult Query::explain() {
int res = _trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
@ -1066,7 +1111,7 @@ QueryResult Query::explain() {
result.result = bestPlan->toVelocyPack(parser.ast(), verbosePlans());
// cacheability
result.cached = (_queryString != nullptr && _queryLength > 0 &&
result.cached = (_queryString != nullptr && _queryStringLength > 0 &&
!_isModificationQuery && _warnings.empty() &&
_ast->root()->isCacheable());
}
@ -1091,16 +1136,20 @@ QueryResult Query::explain() {
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
}
void Query::engine(ExecutionEngine* engine) {
_engine.reset(engine);
}
/// @brief get v8 executor
Executor* Query::executor() {
if (_executor == nullptr) {
// the executor is a singleton per query
_executor = new Executor(literalSizeThreshold());
_executor.reset(new Executor(literalSizeThreshold()));
}
TRI_ASSERT(_executor != nullptr);
return _executor;
return _executor.get();
}
/// @brief enter a V8 context
@ -1226,16 +1275,15 @@ void Query::init() {
}
TRI_ASSERT(_id == 0);
TRI_ASSERT(_ast == nullptr);
_id = Query::NextId();
TRI_ASSERT(_id != 0);
TRI_ASSERT(_profile == nullptr);
_profile = new Profile(this);
_profile.reset(new Profile(this));
enterState(INITIALIZATION);
TRI_ASSERT(_ast == nullptr);
_ast = new Ast(this);
_ast.reset(new Ast(this));
}
/// @brief log a query
@ -1245,16 +1293,20 @@ void Query::log() {
LOG_TOPIC(TRACE, Logger::QUERIES)
<< "executing query " << _id << ": '"
<< std::string(_queryString, (std::min)(_queryLength, MaxLength))
.append(_queryLength > MaxLength ? "..." : "") << "'";
<< std::string(_queryString, (std::min)(_queryStringLength, MaxLength))
.append(_queryStringLength > MaxLength ? "..." : "") << "'";
}
}
/// @brief calculate a hash value for the query and bind parameters
uint64_t Query::hash() const {
if (_queryString == nullptr) {
return DontCache;
}
// hash the query string first
uint64_t hash = arangodb::aql::QueryCache::instance()->hashQueryString(
_queryString, _queryLength);
_queryString, _queryStringLength);
// handle "fullCount" option. if this option is set, the query result will
// be different to when it is not set!
@ -1270,6 +1322,14 @@ uint64_t Query::hash() const {
} else {
hash = fasthash64(TRI_CHAR_LENGTH_PAIR("count:false"), hash);
}
// also hash "optimizer" options
VPackSlice options = basics::VelocyPackHelper::EmptyObjectValue();
if (_options != nullptr && _options->slice().isObject()) {
options = _options->slice().get("optimizer");
}
hash ^= options.hash();
// blend query hash with bind parameters
return hash ^ _bindParameters.hash();
@ -1277,7 +1337,7 @@ uint64_t Query::hash() const {
/// @brief whether or not the query cache can be used for the query
bool Query::canUseQueryCache() const {
if (_queryString == nullptr || _queryLength < 8) {
if (_queryString == nullptr || _queryStringLength < 8) {
return false;
}
@ -1302,16 +1362,17 @@ bool Query::canUseQueryCache() const {
return false;
}
/// @brief neatly format transaction error to the user.
QueryResult Query::transactionError(int errorCode) const {
/// @brief neatly format exception messages for the users
std::string Query::buildErrorMessage(int errorCode) const {
std::string err(TRI_errno_string(errorCode));
if (_queryString != nullptr && verboseErrors()) {
err +=
std::string("\nwhile executing:\n") + _queryString + std::string("\n");
err += "\nwhile executing:\n";
err.append(_queryString, _queryStringLength);
err += "\n";
}
return QueryResult(errorCode, err);
return err;
}
/// @brief read the "optimizer.inspectSimplePlans" section from the options
@ -1401,8 +1462,7 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
// shutdown may fail but we must not throw here
// (we're also called from the destructor)
}
delete _engine;
_engine = nullptr;
_engine.reset();
}
if (_trx != nullptr) {
@ -1411,17 +1471,11 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
_trx = nullptr;
}
if (_parser != nullptr) {
delete _parser;
_parser = nullptr;
}
_plan.reset();
}
/// @brief create a TransactionContext
std::shared_ptr<TransactionContext>
Query::createTransactionContext() {
std::shared_ptr<TransactionContext> Query::createTransactionContext() {
if (_contextOwnedByExterior) {
// we can use v8
return arangodb::V8TransactionContext::Create(_vocbase, true);
@ -1430,7 +1484,7 @@ Query::createTransactionContext() {
return arangodb::StandaloneTransactionContext::Create(_vocbase);
}
/// @brief look up a graph either from our cache list or from the _graphs
/// @brief look up a graph either from our cache list or from the _graphs
/// collection
Graph const* Query::lookupGraphByName(std::string const& name) {
auto it = _graphs.find(name);
@ -1440,7 +1494,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) {
}
std::unique_ptr<arangodb::aql::Graph> g(
arangodb::lookupGraphByName(_vocbase, name));
arangodb::lookupGraphByName(createTransactionContext(), name));
if (g == nullptr) {
return nullptr;
@ -1450,7 +1504,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) {
return g.release();
}
/// @brief returns the next query id
TRI_voc_tick_t Query::NextId() {
return NextQueryId.fetch_add(1, std::memory_order_seq_cst);

View File

@ -59,7 +59,6 @@ class Ast;
class ExecutionEngine;
class ExecutionPlan;
class Executor;
class Parser;
class Query;
class QueryRegistry;
@ -164,10 +163,12 @@ class Query {
char const* queryString() const { return _queryString; }
/// @brief get the length of the query string
size_t queryLength() const { return _queryLength; }
size_t queryLength() const { return _queryStringLength; }
/// @brief getter for _ast
Ast* ast() const { return _ast; }
Ast* ast() const {
return _ast.get();
}
/// @brief should we return verbose plans?
bool verbosePlans() const { return getBooleanOption("verbosePlans", false); }
@ -238,12 +239,8 @@ class Query {
/// @brief register a warning
void registerWarning(int, char const* = nullptr);
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
QueryResult prepare(QueryRegistry*);
void prepare(QueryRegistry*, uint64_t queryStringHash);
/// @brief execute an AQL query
QueryResult execute(QueryRegistry*);
@ -262,10 +259,10 @@ class Query {
Executor* executor();
/// @brief return the engine, if prepared
ExecutionEngine* engine() { return _engine; }
ExecutionEngine* engine() const { return _engine.get(); }
/// @brief inject the engine
void engine(ExecutionEngine* engine) { _engine = engine; }
void engine(ExecutionEngine* engine);
/// @brief return the transaction, if prepared
inline transaction::Methods* trx() { return _trx; }
@ -333,6 +330,12 @@ class Query {
/// @brief initializes the query
void init();
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
ExecutionPlan* prepare();
void setExecutionTime();
/// @brief log a query
@ -371,8 +374,8 @@ class Query {
/// @brief read the "optimizer.rules" section from the options
std::vector<std::string> getRulesFromOptions() const;
/// @brief neatly format transaction errors to the user.
QueryResult transactionError(int errorCode) const;
/// @brief neatly format exception messages for the users
std::string buildErrorMessage(int errorCode) const;
/// @brief enter a new state
void enterState(ExecutionState);
@ -400,7 +403,7 @@ class Query {
TRI_vocbase_t* _vocbase;
/// @brief V8 code executor
Executor* _executor;
std::unique_ptr<Executor> _executor;
/// @brief the currently used V8 context
V8Context* _context;
@ -412,7 +415,7 @@ class Query {
char const* _queryString;
/// @brief length of the query string in bytes
size_t const _queryLength;
size_t const _queryStringLength;
/// @brief query in a VelocyPack structure
std::shared_ptr<arangodb::velocypack::Builder> const _queryBuilder;
@ -428,20 +431,17 @@ class Query {
/// @brief _ast, we need an ast to manage the memory for AstNodes, even
/// if we do not have a parser, because AstNodes occur in plans and engines
Ast* _ast;
std::unique_ptr<Ast> _ast;
/// @brief query execution profile
Profile* _profile;
std::unique_ptr<Profile> _profile;
/// @brief current state the query is in (used for profiling and error
/// messages)
ExecutionState _state;
/// @brief the ExecutionPlan object, if the query is prepared
std::unique_ptr<ExecutionPlan> _plan;
/// @brief the Parser object, if the query is prepared
Parser* _parser;
std::shared_ptr<ExecutionPlan> _plan;
/// @brief the transaction object, in a distributed query every part of
/// the query has its own transaction object. The transaction object is
@ -449,7 +449,7 @@ class Query {
transaction::Methods* _trx;
/// @brief the ExecutionEngine object, if the query is prepared
ExecutionEngine* _engine;
std::unique_ptr<ExecutionEngine> _engine;
/// @brief maximum number of warnings
size_t _maxWarningCount;

View File

@ -21,7 +21,7 @@
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "Aql/QueryCache.h"
#include "QueryCache.h"
#include "Basics/fasthash.h"
#include "Basics/Exceptions.h"
#include "Basics/MutexLocker.h"

View File

@ -55,6 +55,12 @@ QueryResources::~QueryResources() {
_resourceMonitor->decreaseMemoryUsage(_nodes.size() * sizeof(AstNode) + _nodes.capacity() * sizeof(AstNode*));
}
// TODO: FIXME
void QueryResources::steal() {
_strings.clear();
_nodes.clear();
}
/// @brief add a node to the list of nodes
void QueryResources::addNode(AstNode* node) {

View File

@ -41,7 +41,9 @@ class QueryResources {
explicit QueryResources(ResourceMonitor*);
~QueryResources();
void steal();
/// @brief add a node to the list of nodes
void addNode(AstNode*);

View File

@ -44,6 +44,7 @@ struct QueryResultV8 : public QueryResult {
QueryResultV8(int code, std::string const& details)
: QueryResult(code, details), result() {}
QueryResultV8() : QueryResult(TRI_ERROR_NO_ERROR) {}
explicit QueryResultV8(int code) : QueryResult(code, ""), result() {}
v8::Handle<v8::Array> result;

View File

@ -26,6 +26,7 @@
#include "Aql/ClusterBlocks.h"
#include "Aql/ExecutionBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Basics/VPackStringBufferAdapter.h"
@ -95,14 +96,18 @@ void RestAqlHandler::createQueryFromVelocyPack() {
VelocyPackHelper::getStringValue(querySlice, "part", "");
auto planBuilder = std::make_shared<VPackBuilder>(VPackBuilder::clone(plan));
auto query = new Query(false, _vocbase, planBuilder, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details;
generateError(rest::ResponseCode::BAD,
TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details);
delete query;
auto query = std::make_unique<Query>(false, _vocbase, planBuilder, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
try {
query->prepare(_queryRegistry, 0);
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
return;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
return;
}
@ -116,14 +121,15 @@ void RestAqlHandler::createQueryFromVelocyPack() {
}
_qId = TRI_NewTickServer();
auto transactionContext = query->trx()->transactionContext().get();
try {
_queryRegistry->insert(_qId, query, ttl);
_queryRegistry->insert(_qId, query.get(), ttl);
query.release();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
return;
}
@ -139,8 +145,7 @@ void RestAqlHandler::createQueryFromVelocyPack() {
return;
}
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
query->trx()->transactionContext().get());
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
}
// POST method for /_api/aql/parse (internal)
@ -165,14 +170,12 @@ void RestAqlHandler::parseQuery() {
return;
}
auto query =
new Query(false, _vocbase, queryString.c_str(), queryString.size(),
auto query = std::make_unique<Query>(false, _vocbase, queryString.c_str(), queryString.size(),
std::shared_ptr<VPackBuilder>(), nullptr, PART_MAIN);
QueryResult res = query->parse();
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the Query: " << res.details;
generateError(rest::ResponseCode::BAD, res.code, res.details);
delete query;
return;
}
@ -306,15 +309,19 @@ void RestAqlHandler::createQueryFromString() {
auto options = std::make_shared<VPackBuilder>(
VPackBuilder::clone(querySlice.get("options")));
auto query = new Query(false, _vocbase, queryString.c_str(),
auto query = std::make_unique<Query>(false, _vocbase, queryString.c_str(),
queryString.size(), bindVars, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details;
generateError(rest::ResponseCode::BAD,
TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details);
delete query;
try {
query->prepare(_queryRegistry, 0);
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
return;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
return;
}
@ -327,15 +334,16 @@ void RestAqlHandler::createQueryFromString() {
ttl = arangodb::basics::StringUtils::doubleDecimal(ttlstring);
}
auto transactionContext = query->trx()->transactionContext().get();
_qId = TRI_NewTickServer();
try {
_queryRegistry->insert(_qId, query, ttl);
_queryRegistry->insert(_qId, query.get(), ttl);
query.release();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
return;
}
@ -351,8 +359,7 @@ void RestAqlHandler::createQueryFromString() {
return;
}
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
query->trx()->transactionContext().get());
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
}
// PUT method for /_api/aql/<operation>/<queryId>, (internal)

View File

@ -25,6 +25,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Utils/OperationCursor.h"
#include "Transaction/Methods.h"
#include "VocBase/EdgeCollectionInfo.h"

View File

@ -28,6 +28,7 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Cluster/ClusterComm.h"
#include "Indexes/Index.h"
#include "Utils/CollectionNameResolver.h"
@ -178,6 +179,12 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id,
} else {
addEdgeColl(eColName, dir);
}
if (dir == TRI_EDGE_ANY) {
// collection with direction ANY must be added again
_graphInfo.add(VPackValue(eColName));
}
}
_graphInfo.close();
} else {
@ -337,9 +344,17 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan,
THROW_ARANGO_EXCEPTION(TRI_ERROR_GRAPH_NOT_FOUND);
}
auto eColls = _graphObj->edgeCollections();
for (auto const& n : eColls) {
_edgeColls.push_back(n);
auto const& eColls = _graphObj->edgeCollections();
for (auto const& it : eColls) {
_edgeColls.push_back(it);
// if there are twice as many directions as collections, this means we
// have a shortest path with direction ANY. we must add each collection
// twice then
if (_directions.size() == 2 * eColls.size()) {
// add collection again
_edgeColls.push_back(it);
}
}
} else {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_BAD_JSON_PLAN,

View File

@ -26,22 +26,23 @@
using namespace arangodb::aql;
ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice) {
VPackSlice obj = slice.get("shortestpathFlags");
weightAttribute = "";
if (obj.hasKey("weightAttribute")) {
VPackSlice v = obj.get("weightAttribute");
if (v.isString()) {
weightAttribute = v.copyString();
ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice)
: weightAttribute(), defaultWeight(1) {
VPackSlice obj = slice.get("shortestPathFlags");
if (obj.isObject()) {
if (obj.hasKey("weightAttribute")) {
VPackSlice v = obj.get("weightAttribute");
if (v.isString()) {
weightAttribute = v.copyString();
}
}
}
defaultWeight = 1;
if (obj.hasKey("defaultWeight")) {
VPackSlice v = obj.get("defaultWeight");
if (v.isNumber()) {
defaultWeight = v.getNumericValue<double>();
if (obj.hasKey("defaultWeight")) {
VPackSlice v = obj.get("defaultWeight");
if (v.isNumber()) {
defaultWeight = v.getNumericValue<double>();
}
}
}
}

View File

@ -39,7 +39,7 @@ struct ShortestPathOptions {
/// @brief constructor, using default values
ShortestPathOptions()
: weightAttribute(""),
: weightAttribute(),
defaultWeight(1) {}
void toVelocyPack(arangodb::velocypack::Builder&) const;

View File

@ -85,6 +85,10 @@ SortCondition::SortCondition(
if (node->type == NODE_TYPE_REFERENCE) {
handled = true;
if (fieldNames.size() > 1) {
std::reverse(fieldNames.begin(), fieldNames.end());
}
_fields.emplace_back(std::make_pair(
static_cast<Variable const*>(node->getData()), fieldNames));
@ -146,7 +150,7 @@ size_t SortCondition::coveredAttributes(
}
auto const& field = _fields[fieldsPosition];
// ...and check if the field is present in the index definition too
if (reference == field.first &&
arangodb::basics::AttributeName::isIdentical(field.second, indexAttributes[i], false)) {

View File

@ -27,6 +27,7 @@
#include "Aql/ExecutionNode.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/ScopeGuard.h"
#include "Basics/StringRef.h"
#include "Cluster/ClusterComm.h"

View File

@ -103,7 +103,57 @@ static AstNode* BuildExpansionReplacement(Ast* ast, AstNode const* condition, As
return ast->createNodeBinaryOperator(type, lhs, rhs);
}
static inline bool IsSupportedNode(AstNode const* node) {
static bool IsSupportedNode(Variable const* pathVar, AstNode const* node) {
// do a quick first check for all comparisons
switch (node->type) {
case NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_NE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_LT:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_LE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_GT:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_GE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_IN:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN:
case NODE_TYPE_OPERATOR_BINARY_EQ:
case NODE_TYPE_OPERATOR_BINARY_NE:
case NODE_TYPE_OPERATOR_BINARY_LT:
case NODE_TYPE_OPERATOR_BINARY_LE:
case NODE_TYPE_OPERATOR_BINARY_GT:
case NODE_TYPE_OPERATOR_BINARY_GE:
case NODE_TYPE_OPERATOR_BINARY_IN:
case NODE_TYPE_OPERATOR_BINARY_NIN: {
// the following types of expressions are not supported
// p.edges[0]._from op whatever attribute access
// whatever attribute access op p.edges[0]._from
AstNode const* lhs = node->getMember(0);
AstNode const* rhs = node->getMember(1);
if (lhs->isAttributeAccessForVariable(pathVar, true)) {
// p.xxx op whatever
if (rhs->type != NODE_TYPE_VALUE &&
rhs->type != NODE_TYPE_ARRAY &&
rhs->type != NODE_TYPE_OBJECT &&
rhs->type != NODE_TYPE_REFERENCE) {
return false;
}
} else if (rhs->isAttributeAccessForVariable(pathVar, true)) {
// whatever op p.xxx
if (lhs->type != NODE_TYPE_VALUE &&
lhs->type != NODE_TYPE_ARRAY &&
lhs->type != NODE_TYPE_OBJECT &&
lhs->type != NODE_TYPE_REFERENCE) {
return false;
}
}
break;
}
default: {
// intentionally no other cases defined...
// we'll simply fall through to the next switch..case statement
break;
}
}
switch (node->type) {
case NODE_TYPE_VARIABLE:
case NODE_TYPE_OPERATOR_UNARY_PLUS:
@ -169,7 +219,7 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
Variable const* pathVar,
bool& conditionIsImpossible) {
AstNode* node = parent->getMemberUnchecked(testIndex);
if (!IsSupportedNode(node)) {
if (!IsSupportedNode(pathVar, node)) {
return false;
}
// We need to walk through each branch and validate:
@ -193,11 +243,11 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
// We define that patternStep >= 6 is complete Match.
unsigned char patternStep = 0;
auto supportedGuard = [&notSupported](AstNode const* n, void*) -> bool {
auto supportedGuard = [&notSupported, pathVar](AstNode const* n, void*) -> bool {
if (notSupported) {
return false;
}
if (!IsSupportedNode(n)) {
if (!IsSupportedNode(pathVar, n)) {
notSupported = true;
return false;
}

View File

@ -26,8 +26,9 @@
////////////////////////////////////////////////////////////////////////////////
#include "TraversalNode.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Aql/SortCondition.h"
#include "Cluster/ClusterComm.h"
#include "Indexes/Index.h"

View File

@ -33,10 +33,11 @@
using namespace arangodb::aql;
/// @brief create the generator
VariableGenerator::VariableGenerator() : _variables(), _id(0) {
VariableGenerator::VariableGenerator()
: _variables(), _id(0) {
_variables.reserve(8);
}
/// @brief destroy the generator
VariableGenerator::~VariableGenerator() {
// free all variables
@ -67,79 +68,50 @@ Variable* VariableGenerator::createVariable(char const* name, size_t length,
bool isUserDefined) {
TRI_ASSERT(name != nullptr);
auto variable = new Variable(std::string(name, length), nextId());
auto variable = std::make_unique<Variable>(std::string(name, length), nextId());
if (isUserDefined) {
TRI_ASSERT(variable->isUserDefined());
}
try {
_variables.emplace(variable->id, variable);
} catch (...) {
// prevent memleak
delete variable;
throw;
}
return variable;
_variables.emplace(variable->id, variable.get());
return variable.release();
}
/// @brief generate a variable
Variable* VariableGenerator::createVariable(std::string const& name,
bool isUserDefined) {
auto variable = new Variable(name, nextId());
auto variable = std::make_unique<Variable>(name, nextId());
if (isUserDefined) {
TRI_ASSERT(variable->isUserDefined());
}
try {
_variables.emplace(variable->id, variable);
} catch (...) {
// prevent memleak
delete variable;
throw;
}
return variable;
_variables.emplace(variable->id, variable.get());
return variable.release();
}
Variable* VariableGenerator::createVariable(Variable const* original) {
TRI_ASSERT(original != nullptr);
auto variable = original->clone();
std::unique_ptr<Variable> variable(original->clone());
try {
_variables.emplace(variable->id, variable);
} catch (...) {
// prevent memleak
delete variable;
throw;
}
return variable;
_variables.emplace(variable->id, variable.get());
return variable.release();
}
/// @brief generate a variable from VelocyPack
Variable* VariableGenerator::createVariable(
VPackSlice const slice) {
auto variable = new Variable(slice);
Variable* VariableGenerator::createVariable(VPackSlice const slice) {
auto variable = std::make_unique<Variable>(slice);
auto existing = getVariable(variable->id);
if (existing != nullptr) {
// variable already existed.
delete variable;
return existing;
}
try {
_variables.emplace(variable->id, variable);
} catch (...) {
// prevent memleak
delete variable;
throw;
}
return variable;
_variables.emplace(variable->id, variable.get());
return variable.release();
}
/// @brief generate a temporary variable

View File

@ -35,13 +35,16 @@ class VariableGenerator {
public:
/// @brief create the generator
VariableGenerator();
VariableGenerator(VariableGenerator const& other) = delete;
VariableGenerator& operator=(VariableGenerator const& other) = delete;
/// @brief destroy the generator
~VariableGenerator();
public:
/// @brief return a map of all variable ids with their names
std::unordered_map<VariableId, std::string const> variables(bool) const;
std::unordered_map<VariableId, std::string const> variables(bool includeTemporaries) const;
/// @brief generate a variable
Variable* createVariable(char const*, size_t, bool);

View File

@ -143,6 +143,7 @@ SET(ARANGOD_SOURCES
Aql/Optimizer.cpp
Aql/OptimizerRules.cpp
Aql/Parser.cpp
Aql/PlanCache.cpp
Aql/Quantifier.cpp
Aql/Query.cpp
Aql/QueryCache.cpp

View File

@ -46,9 +46,7 @@ AgencyCallbackRegistry::AgencyCallbackRegistry(std::string const& callbackBasePa
AgencyCallbackRegistry::~AgencyCallbackRegistry() {
}
AgencyCommResult AgencyCallbackRegistry::registerCallback(
std::shared_ptr<AgencyCallback> cb) {
bool AgencyCallbackRegistry::registerCallback(std::shared_ptr<AgencyCallback> cb) {
uint32_t rand;
{
WRITE_LOCKER(locker, _lock);
@ -60,28 +58,23 @@ AgencyCommResult AgencyCallbackRegistry::registerCallback(
}
}
AgencyCommResult result;
bool ok = false;
try {
result = _agency.registerCallback(cb->key, getEndpointUrl(rand));
if (!result.successful()) {
LOG_TOPIC(ERR, arangodb::Logger::AGENCY)
<< "Registering callback failed with " << result.errorCode() << ": "
<< result.errorMessage();
ok = _agency.registerCallback(cb->key, getEndpointUrl(rand)).successful();
if (!ok) {
LOG_TOPIC(ERR, Logger::CLUSTER) << "Registering callback failed";
}
} catch (std::exception const& e) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Couldn't register callback " << e.what();
LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't register callback " << e.what();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
LOG_TOPIC(ERR, Logger::CLUSTER)
<< "Couldn't register callback. Unknown exception";
}
if (!result.successful()) {
if (!ok) {
WRITE_LOCKER(locker, _lock);
_endpoints.erase(rand);
}
return result;
return ok;
}
std::shared_ptr<AgencyCallback> AgencyCallbackRegistry::getCallback(uint32_t id) {

View File

@ -44,7 +44,7 @@ public:
//////////////////////////////////////////////////////////////////////////////
/// @brief register a callback
//////////////////////////////////////////////////////////////////////////////
AgencyCommResult registerCallback(std::shared_ptr<AgencyCallback>);
bool registerCallback(std::shared_ptr<AgencyCallback>);
//////////////////////////////////////////////////////////////////////////////
/// @brief unregister a callback

View File

@ -512,12 +512,12 @@ void ClusterInfo::loadPlan() {
// This should not happen in healthy situations.
// If it happens in unhealthy situations the
// cluster should not fail.
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Failed to load information for collection '"
<< collectionId << "': " << ex.what()
<< ". invalid information in plan. The collection will "
"be ignored for now and the invalid information will "
"be repaired. VelocyPack: "
<< collectionSlice.toJson();
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Failed to load information for collection '" << collectionId
<< "': " << ex.what() << ". invalid information in plan. The"
"collection will be ignored for now and the invalid information"
"will be repaired. VelocyPack: "
<< collectionSlice.toJson();
TRI_ASSERT(false);
continue;
@ -526,12 +526,12 @@ void ClusterInfo::loadPlan() {
// This should not happen in healthy situations.
// If it happens in unhealthy situations the
// cluster should not fail.
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Failed to load information for collection '"
<< collectionId
<< ". invalid information in plan. The collection will "
"be ignored for now and the invalid information will "
"be repaired. VelocyPack: "
<< collectionSlice.toJson();
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Failed to load information for collection '" << collectionId
<< ". invalid information in plan. The collection will "
"be ignored for now and the invalid information will "
"be repaired. VelocyPack: "
<< collectionSlice.toJson();
TRI_ASSERT(false);
continue;
@ -886,14 +886,7 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name,
// AgencyCallback for this.
auto agencyCallback = std::make_shared<AgencyCallback>(
ac, "Current/Databases/" + name, dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
AgencyOperation newVal("Plan/Databases/" + name,
@ -988,14 +981,7 @@ int ClusterInfo::dropDatabaseCoordinator(std::string const& name,
// AgencyCallback for this.
auto agencyCallback =
std::make_shared<AgencyCallback>(ac, where, dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
// Transact to agency
@ -1151,14 +1137,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
auto agencyCallback = std::make_shared<AgencyCallback>(
ac, "Current/Collections/" + databaseName + "/" + collectionID,
dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
VPackBuilder builder;
@ -1286,14 +1265,7 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName,
// AgencyCallback for this.
auto agencyCallback =
std::make_shared<AgencyCallback>(ac, where, dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
size_t numberOfShards = 0;
@ -1544,8 +1516,6 @@ int ClusterInfo::ensureIndexCoordinator(
if (idxSlice.isString()) {
// use predefined index id
iid = arangodb::basics::StringUtils::uint64(idxSlice.copyString());
} else if (idxSlice.isNumber()) {
iid = idxSlice.getNumber<uint64_t>();
}
if (iid == 0) {
@ -1776,14 +1746,7 @@ int ClusterInfo::ensureIndexCoordinator(
// AgencyCallback for this.
auto agencyCallback =
std::make_shared<AgencyCallback>(ac, where, dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
AgencyOperation newValue(key, AgencyValueOperationType::SET,
@ -1810,7 +1773,6 @@ int ClusterInfo::ensureIndexCoordinator(
errorMsg += trx.toJson();
errorMsg += "ClientId: " + result._clientId + " ";
errorMsg += " ResultCode: " + std::to_string(result.errorCode()) + " ";
errorMsg += " Result: " + result.errorMessage() + " ";
errorMsg += std::string(__FILE__) + ":" + std::to_string(__LINE__);
resultBuilder = *resBuilder;
}
@ -1950,14 +1912,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName,
// AgencyCallback for this.
auto agencyCallback =
std::make_shared<AgencyCallback>(ac, where, dbServerChanged, true, false);
auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback);
if (!regres.successful()) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Could not register call back with error: " << regres.errorCode() <<
" - " << regres.errorMessage();
}
_agencyCallbackRegistry->registerCallback(agencyCallback);
TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback));
loadPlan();
@ -2440,26 +2395,28 @@ std::shared_ptr<std::vector<ServerID>> ClusterInfo::getResponsibleServer(
while (true) {
{
READ_LOCKER(readLocker, _currentProt.lock);
// _shardIds is a map-type <ShardId,
// std::shared_ptr<std::vector<ServerId>>>
auto it = _shardIds.find(shardID);
{
READ_LOCKER(readLocker, _currentProt.lock);
// _shardIds is a map-type <ShardId,
// std::shared_ptr<std::vector<ServerId>>>
auto it = _shardIds.find(shardID);
if (it != _shardIds.end()) {
auto serverList = (*it).second;
if (serverList != nullptr && serverList->size() > 0 &&
(*serverList)[0].size() > 0 && (*serverList)[0][0] == '_') {
// This is a temporary situation in which the leader has already
// resigned, let's wait half a second and try again.
--tries;
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "getResponsibleServer: found resigned leader,"
<< "waiting for half a second...";
usleep(500000);
} else {
return (*it).second;
if (it != _shardIds.end()) {
auto serverList = (*it).second;
if (serverList != nullptr && serverList->size() > 0 &&
(*serverList)[0].size() > 0 && (*serverList)[0][0] == '_') {
// This is a temporary situation in which the leader has already
// resigned, let's wait half a second and try again.
--tries;
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "getResponsibleServer: found resigned leader,"
<< "waiting for half a second...";
} else {
return (*it).second;
}
}
}
usleep(500000);
}
if (++tries >= 2) {

View File

@ -241,7 +241,7 @@ void HeartbeatThread::runDBServer() {
bool registered = false;
while (!registered) {
registered =
_agencyCallbackRegistry->registerCallback(planAgencyCallback).successful();
_agencyCallbackRegistry->registerCallback(planAgencyCallback);
if (!registered) {
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Couldn't register plan change in agency!";

View File

@ -213,13 +213,17 @@ static bool SetRequestContext(GeneralRequest* request, void* data) {
if (vocbase == nullptr) {
return false;
}
TRI_ASSERT(!vocbase->isDangling());
// database needs upgrade
if (vocbase->state() == TRI_vocbase_t::State::FAILED_VERSION) {
request->setRequestPath("/_msg/please-upgrade");
vocbase->release();
return false;
}
// the vocbase context is now responsible for releasing the vocbase
VocbaseContext* ctx = new arangodb::VocbaseContext(request, vocbase);
request->setRequestContext(ctx, true);

View File

@ -1645,6 +1645,9 @@ void MMFilesCollection::open(bool ignoreErrors) {
arangodb::SingleCollectionTransaction trx(
arangodb::StandaloneTransactionContext::Create(vocbase), cid,
AccessMode::Type::WRITE);
// the underlying collections must not be locked here because the "load"
// routine can be invoked from any other place, e.g. from an AQL query
trx.addHint(transaction::Hints::Hint::LOCK_NEVER);
// build the primary index
double startIterate = TRI_microtime();
@ -1930,7 +1933,6 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
}
auto vocbase = _logicalCollection->vocbase();
arangodb::aql::QueryCache::instance()->invalidate(vocbase, _logicalCollection->name());
if (!_logicalCollection->removeIndex(iid)) {
// We tried to remove an index that does not exist
events::DropIndex("", std::to_string(iid),

View File

@ -535,7 +535,7 @@ void MMFilesLogfileManager::unprepare() {
}
// registers a transaction
int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId) {
int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId, bool isReadOnlyTransaction) {
auto lastCollectedId = _lastCollectedId.load();
auto lastSealedId = _lastSealedId.load();
@ -546,6 +546,16 @@ int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId) {
TRI_ASSERT(lastCollectedId <= lastSealedId);
if (isReadOnlyTransaction) {
// in case this is a read-only transaction, we are sure that the transaction can
// only see committed data (as itself it will not write anything, and write transactions
// run exclusively). we thus can allow the WAL collector to already seal and collect
// logfiles. the only thing that needs to be ensured for read-only transactions is
// that a logfile does not get thrown away while the read-only transaction is
// ongoing
lastSealedId = 0;
}
try {
auto data = std::make_unique<MMFilesTransactionData>(lastCollectedId, lastSealedId);
TransactionManagerFeature::MANAGER->registerTransaction(transactionId, std::move(data));
@ -1336,8 +1346,8 @@ MMFilesWalLogfile* MMFilesLogfileManager::getCollectableLogfile() {
// iterate over all active readers and find their minimum used logfile id
MMFilesWalLogfile::IdType minId = UINT64_MAX;
auto cb = [&minId](TRI_voc_tid_t, TransactionData* data) {
MMFilesWalLogfile::IdType lastWrittenId = static_cast<MMFilesTransactionData*>(data)->lastSealedId;
auto cb = [&minId](TRI_voc_tid_t, TransactionData const* data) {
MMFilesWalLogfile::IdType lastWrittenId = static_cast<MMFilesTransactionData const*>(data)->lastSealedId;
if (lastWrittenId < minId && lastWrittenId != 0) {
minId = lastWrittenId;
@ -1383,8 +1393,8 @@ MMFilesWalLogfile* MMFilesLogfileManager::getRemovableLogfile() {
MMFilesWalLogfile::IdType minId = UINT64_MAX;
// iterate over all active transactions and find their minimum used logfile id
auto cb = [&minId](TRI_voc_tid_t, TransactionData* data) {
MMFilesWalLogfile::IdType lastCollectedId = static_cast<MMFilesTransactionData*>(data)->lastCollectedId;
auto cb = [&minId](TRI_voc_tid_t, TransactionData const* data) {
MMFilesWalLogfile::IdType lastCollectedId = static_cast<MMFilesTransactionData const*>(data)->lastCollectedId;
if (lastCollectedId < minId && lastCollectedId != 0) {
minId = lastCollectedId;
@ -1553,15 +1563,15 @@ MMFilesLogfileManager::runningTransactions() {
MMFilesWalLogfile::IdType lastCollectedId = UINT64_MAX;
MMFilesWalLogfile::IdType lastSealedId = UINT64_MAX;
auto cb = [&count, &lastCollectedId, &lastSealedId](TRI_voc_tid_t, TransactionData* data) {
auto cb = [&count, &lastCollectedId, &lastSealedId](TRI_voc_tid_t, TransactionData const* data) {
++count;
MMFilesWalLogfile::IdType value = static_cast<MMFilesTransactionData*>(data)->lastCollectedId;
MMFilesWalLogfile::IdType value = static_cast<MMFilesTransactionData const*>(data)->lastCollectedId;
if (value < lastCollectedId && value != 0) {
lastCollectedId = value;
}
value = static_cast<MMFilesTransactionData*>(data)->lastSealedId;
value = static_cast<MMFilesTransactionData const*>(data)->lastSealedId;
if (value < lastSealedId && value != 0) {
lastSealedId = value;
}

View File

@ -68,8 +68,8 @@ struct MMFilesTransactionData final : public TransactionData {
MMFilesTransactionData() = delete;
MMFilesTransactionData(MMFilesWalLogfile::IdType lastCollectedId, MMFilesWalLogfile::IdType lastSealedId) :
lastCollectedId(lastCollectedId), lastSealedId(lastSealedId) {}
MMFilesWalLogfile::IdType lastCollectedId;
MMFilesWalLogfile::IdType lastSealedId;
MMFilesWalLogfile::IdType const lastCollectedId;
MMFilesWalLogfile::IdType const lastSealedId;
};
struct MMFilesLogfileManagerState {
@ -218,7 +218,7 @@ class MMFilesLogfileManager final : public application_features::ApplicationFeat
}
// registers a transaction
int registerTransaction(TRI_voc_tid_t);
int registerTransaction(TRI_voc_tid_t id, bool isReadOnlyTransaction);
// return the set of dropped collections
/// this is used during recovery and not used afterwards
@ -459,8 +459,8 @@ class MMFilesLogfileManager final : public application_features::ApplicationFeat
bool _allowOversizeEntries = true;
bool _useMLock = false;
std::string _directory = "";
uint32_t _historicLogfiles = 10;
std::string _directory;
uint32_t _historicLogfiles = 10;
bool _ignoreLogfileErrors = false;
bool _ignoreRecoveryErrors = false;
uint64_t _flushTimeout = 15000;

View File

@ -104,8 +104,8 @@ int MMFilesTransactionState::beginTransaction(transaction::Hints hints) {
_id = TRI_NewTickServer();
// register a protector
int res = logfileManager->registerTransaction(_id);
int res = logfileManager->registerTransaction(_id, isReadOnlyTransaction());
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
@ -195,6 +195,12 @@ int MMFilesTransactionState::abortTransaction(transaction::Methods* activeTrx) {
updateStatus(transaction::Status::ABORTED);
if (_hasOperations) {
// must clean up the query cache because the transaction
// may have queried something via AQL that is now rolled back
clearQueryCache();
}
freeOperations(activeTrx);
}
@ -336,6 +342,9 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId,
copy.release();
operation.swapped();
_hasOperations = true;
arangodb::aql::QueryCache::instance()->invalidate(
_vocbase, collection->name());
}
physical->setRevision(revisionId, false);

View File

@ -1199,7 +1199,7 @@ void RestReplicationHandler::handleCommandClusterInventory() {
/// @brief creates a collection, based on the VelocyPack provided TODO: MOVE
////////////////////////////////////////////////////////////////////////////////
int RestReplicationHandler::createCollection(VPackSlice const& slice,
int RestReplicationHandler::createCollection(VPackSlice slice,
arangodb::LogicalCollection** dst,
bool reuseId) {
if (dst != nullptr) {
@ -1242,17 +1242,20 @@ int RestReplicationHandler::createCollection(VPackSlice const& slice,
return TRI_ERROR_NO_ERROR;
}
int res = TRI_ERROR_NO_ERROR;
try {
col = _vocbase->createCollection(slice, cid, true);
} catch (basics::Exception const& ex) {
res = ex.code();
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
// always use current version number when restoring a collection,
// because the collection is effectively NEW
VPackBuilder patch;
patch.openObject();
patch.add("version", VPackValue(LogicalCollection::VERSION_31));
patch.close();
VPackBuilder builder = VPackCollection::merge(slice, patch.slice(), false);
slice = builder.slice();
if (res != TRI_ERROR_NO_ERROR) {
return res;
col = _vocbase->createCollection(slice, cid, true);
if (col == nullptr) {
return TRI_ERROR_INTERNAL;
}
TRI_ASSERT(col != nullptr);
@ -1660,6 +1663,10 @@ int RestReplicationHandler::processRestoreCollectionCoordinator(
TRI_ASSERT(replicationFactor > 0);
toMerge.add("replicationFactor", VPackValue(replicationFactor));
}
// always use current version number when restoring a collection,
// because the collection is effectively NEW
toMerge.add("version", VPackValue(LogicalCollection::VERSION_31));
toMerge.close(); // TopLevel
VPackSlice const type = parameters.get("type");
@ -1675,6 +1682,7 @@ int RestReplicationHandler::processRestoreCollectionCoordinator(
VPackBuilder mergedBuilder =
VPackCollection::merge(parameters, sliceToMerge, false);
VPackSlice const merged = mergedBuilder.slice();
try {
auto col = ClusterMethods::createCollectionOnCoordinator(collectionType,
_vocbase, merged);

View File

@ -163,7 +163,7 @@ class RestReplicationHandler : public RestVocbaseBaseHandler {
/// @brief creates a collection, based on the VelocyPack provided TODO: MOVE
//////////////////////////////////////////////////////////////////////////////
int createCollection(VPackSlice const&, arangodb::LogicalCollection**, bool);
int createCollection(VPackSlice, arangodb::LogicalCollection**, bool);
//////////////////////////////////////////////////////////////////////////////
/// @brief handle a restore command for a specific collection

View File

@ -25,6 +25,7 @@
#include "Agency/v8-agency.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Aql/PlanCache.h"
#include "Aql/QueryCache.h"
#include "Aql/QueryRegistry.h"
#include "Basics/ArangoGlobalContext.h"
@ -90,7 +91,7 @@ void DatabaseManagerThread::run() {
auto theLists = databaseFeature->_databasesLists.load();
for (TRI_vocbase_t* vocbase : theLists->_droppedDatabases) {
if (!vocbase->canBeDropped()) {
if (!vocbase->isDangling()) {
continue;
}
@ -133,10 +134,12 @@ void DatabaseManagerThread::run() {
// not possible that another thread has seen this very database
// and tries to free it at the same time!
}
if (database->type() != TRI_VOCBASE_TYPE_COORDINATOR) {
// regular database
// ---------------------------
TRI_ASSERT(!database->isSystem());
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "physically removing database directory '"
<< engine->databasePath(database) << "' of database '"
@ -606,7 +609,8 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
}
// increase reference counter
vocbase->use();
bool result = vocbase->use();
TRI_ASSERT(result);
}
{
@ -718,13 +722,6 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker,
// mark as deleted
TRI_ASSERT(vocbase->type() == TRI_VOCBASE_TYPE_NORMAL);
if (!vocbase->markAsDropped()) {
// deleted by someone else?
delete newLists;
events::DropDatabase(name, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
return TRI_ERROR_ARANGO_DATABASE_NOT_FOUND;
}
newLists->_databases.erase(it);
newLists->_droppedDatabases.insert(vocbase);
}
@ -739,14 +736,21 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker,
_databasesLists = newLists;
_databasesProtector.scan();
delete oldLists;
TRI_ASSERT(!vocbase->isSystem());
bool result = vocbase->markAsDropped();
TRI_ASSERT(result);
vocbase->setIsOwnAppsDirectory(removeAppsDirectory);
// invalidate all entries for the database
arangodb::aql::PlanCache::instance()->invalidate(vocbase);
arangodb::aql::QueryCache::instance()->invalidate(vocbase);
engine->prepareDropDatabase(vocbase, writeMarker, res);
}
// must not use the database after here, as it may now be
// deleted by the DatabaseManagerThread!
if (res == TRI_ERROR_NO_ERROR && waitForDeletion) {
engine->waitUntilDeletion(id, true, res);
@ -812,6 +816,9 @@ std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIds(
for (auto& p : theLists->_databases) {
TRI_vocbase_t* vocbase = p.second;
TRI_ASSERT(vocbase != nullptr);
if (vocbase->isDropped()) {
continue;
}
if (includeSystem || vocbase->name() != TRI_VOC_SYSTEM_DATABASE) {
ids.emplace_back(vocbase->id());
}
@ -832,7 +839,9 @@ std::vector<std::string> DatabaseFeature::getDatabaseNames() {
for (auto& p : theLists->_databases) {
TRI_vocbase_t* vocbase = p.second;
TRI_ASSERT(vocbase != nullptr);
if (vocbase->isDropped()) {
continue;
}
names.emplace_back(vocbase->name());
}
}
@ -856,6 +865,9 @@ std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(
for (auto& p : theLists->_databases) {
TRI_vocbase_t* vocbase = p.second;
TRI_ASSERT(vocbase != nullptr);
if (vocbase->isDropped()) {
continue;
}
auto authentication = application_features::ApplicationServer::getFeature<
AuthenticationFeature>("Authentication");
@ -877,7 +889,8 @@ std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(
}
void DatabaseFeature::useSystemDatabase() {
useDatabase(TRI_VOC_SYSTEM_DATABASE);
bool result = useDatabase(TRI_VOC_SYSTEM_DATABASE);
TRI_ASSERT(result);
}
/// @brief get a coordinator database by its id
@ -924,8 +937,9 @@ TRI_vocbase_t* DatabaseFeature::useDatabase(std::string const& name) {
if (it != theLists->_databases.end()) {
TRI_vocbase_t* vocbase = it->second;
vocbase->use();
return vocbase;
if (vocbase->use()) {
return vocbase;
}
}
return nullptr;
@ -939,8 +953,10 @@ TRI_vocbase_t* DatabaseFeature::useDatabase(TRI_voc_tick_t id) {
TRI_vocbase_t* vocbase = p.second;
if (vocbase->id() == id) {
vocbase->use();
return vocbase;
if (vocbase->use()) {
return vocbase;
}
break;
}
}

View File

@ -54,9 +54,15 @@ VocbaseContext::VocbaseContext(GeneralRequest* request, TRI_vocbase_t* vocbase)
TRI_ASSERT(_vocbase != nullptr);
_authentication = FeatureCacheFeature::instance()->authenticationFeature();
TRI_ASSERT(_authentication != nullptr);
// _vocbase has already been refcounted for us
TRI_ASSERT(!_vocbase->isDangling());
}
VocbaseContext::~VocbaseContext() { _vocbase->release(); }
VocbaseContext::~VocbaseContext() {
TRI_ASSERT(!_vocbase->isDangling());
_vocbase->release();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief checks the authentication

Some files were not shown because too many files have changed in this diff Show More