1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Simon Grätzer 2016-10-07 11:52:20 +02:00
commit d6dc8e776d
126 changed files with 3043 additions and 1428 deletions

View File

@ -1,6 +1,8 @@
devel
-----
* fixed issue #2086
* fixed issue #2079
* fixed issue #2071
@ -138,6 +140,8 @@ devel
v3.0.11 (2016-XX-XX)
--------------------
* fixed issue #2081
* fixed issue #2038
@ -694,9 +698,33 @@ v3.0.0-rc1 (2015-06-10)
using a backwards-compatible "legacy mode"
v2.8.11 (XXXX-XX-XX)
v2.8.12 (XXXX-XX-XX)
--------------------
* issue #2091: decrease connect timeout to 5 seconds on startup
* fixed issue #2072
* slightly better error diagnostics for some replication errors
* fixed issue #1977
* fixed issue in `INTERSECTION` AQL function with duplicate elements
in the source arrays
* fixed issue #1962
* fixed issue #1959
* export aqlQuery template handler as require('org/arangodb').aql for forwards-compatibility
v2.8.11 (2016-07-13)
--------------------
* fixed array index batch insertion issues for hash indexes that caused problems when
no elements remained for insertion
* fixed issue #1937

View File

@ -64,7 +64,7 @@ endif ()
################################################################################
set(ARANGODB_VERSION_MAJOR "3")
set(ARANGODB_VERSION_MINOR "0")
set(ARANGODB_VERSION_MINOR "1")
set(ARANGODB_VERSION_REVISION "devel")
set(ARANGODB_PACKAGE_REVISION "1")
@ -105,6 +105,7 @@ set(BIN_ARANGOVPACK arangovpack)
set(TEST_BASICS_SUITE basics_suite)
set(TEST_GEO_SUITE geo_suite)
set(PACKAGES_LIST)
set(CLEAN_PACKAGES_LIST)
################################################################################
## VERSION FILES
@ -932,4 +933,8 @@ add_custom_target(packages
DEPENDS ${PACKAGES_LIST}
)
add_custom_target(clean_packages
DEPENDS ${CLEAN_PACKAGES_LIST}
)
message(STATUS "building for git revision: ${ARANGODB_BUILD_REPOSITORY}")

View File

@ -0,0 +1,22 @@
!CHAPTER Audit Configuration
!SECTION Output
`--audit.output output`
Specifies the target of the audit log. Possible values are
`file://filename` where *filename* can be relative or absolute.
`syslog://facility` or `syslog://facility/application-name` to log
into a syslog server.
The option can be specified multiple times in order to configure the
output for multiple targets.
!SECTION Hostname
`--audit.hostname name`
The name of the server used in audit log messages. By default the
system hostname is used.

View File

@ -0,0 +1,139 @@
!CHAPTER Audit Events
!SECTION Authentication
!SUBSECTION Unknown authentication methods
```
2016-10-03 15:44:23 | server1 | - | database1 | 127.0.0.1:61525 | - | unknown authentication method | /_api/version
```
!SUBSECTION Missing credentials
```
2016-10-03 15:39:49 | server1 | - | database1 | 127.0.0.1:61498 | - | credentials missing | /_api/version
```
!SUBSECTION Wrong credentials
```
2016-10-03 15:47:26 | server1 | user1 | database1 | 127.0.0.1:61528 | http basic | credentials wrong | /_api/version
```
!SUBSECTION Password change required
```
2016-10-03 16:18:53 | server1 | user1 | database1 | 127.0.0.1:62257 | - | password change required | /_api/version
```
!SUBSECTION JWT login succeeded
```
2016-10-03 17:21:22 | server1 | - | database1 | 127.0.0.1:64214 | http jwt | user 'root' authenticated | /_open/auth
```
Please note, that the user given as third part is the user that requested
the login. In general, it will be empty.
!SUBSECTION JWT login failed
```
2016-10-03 17:21:22 | server1 | - | database1 | 127.0.0.1:64214 | http jwt | user 'root' wrong credentials | /_open/auth
```
Please note, that the user given as third part is the user that requested
the login. In general, it will be empty.
!SECTION Authorization
!SUBSECTION User not authorized to access database
```
2016-10-03 16:20:52 | server1 | user1 | database2 | 127.0.0.1:62262 | http basic | not authorized | /_api/version
```
!SECTION Databases
!SUBSECTION Create a database
```
2016-10-04 15:33:25 | server1 | user1 | database1 | 127.0.0.1:56920 | http basic | create database 'database1' | ok | /_api/database
```
!SUBSECTION Drop a database
```
2016-10-04 15:33:25 | server1 | user1 | database1 | 127.0.0.1:56920 | http basic | delete database 'database1' | ok | /_api/database
```
!SECTION Collections
!SUBSECTION Create a collection
```
2016-10-05 17:35:57 | server1 | user1 | database1 | 127.0.0.1:51294 | http basic | create collection 'collection1' | ok | /_api/collection
```
!SUBSECTION Truncate a collection
```
2016-10-05 17:36:08 | server1 | user1 | database1 | 127.0.0.1:51294 | http basic | truncate collection 'collection1' | ok | /_api/collection/collection1/truncate
```
!SUBSECTION Drop a collection
```
2016-10-05 17:36:30 | server1 | user1 | database1 | 127.0.0.1:51294 | http basic | delete collection 'collection1' | ok | /_api/collection/collection1
```
!SECTION Indexes
!SUBSECTION Create a index
```
2016-10-05 18:19:40 | server1 | user1 | database1 | 127.0.0.1:52467 | http basic | create index in 'collection1' | ok | {"fields":["a"],"sparse":false,"type":"skiplist","unique":false} | /_api/index?collection=collection1
```
!SUBSECTION Drop a index
```
2016-10-05 18:18:28 | server1 | user1 | database1 | 127.0.0.1:52464 | http basic | drop index ':44051' | ok | /_api/index/collection1/44051
```
!SECTION Documents
!SUBSECTION Reading a single document
```
2016-10-04 12:27:55 | server1 | user1 | database1 | 127.0.0.1:53699 | http basic | create document ok | /_api/document/collection1
```
!SUBSECTION Replacing a single document
```
2016-10-04 12:28:08 | server1 | user1 | database1 | 127.0.0.1:53699 | http basic | replace document ok | /_api/document/collection1/21456?ignoreRevs=false
```
!SUBSECTION Modifying a single document
```
2016-10-04 12:28:15 | server1 | user1 | database1 | 127.0.0.1:53699 | http basic | modify document ok | /_api/document/collection1/21456?keepNull=true&ignoreRevs=false
```
!SUBSECTION Deleting a single document
```
2016-10-04 12:28:23 | server1 | user1 | database1 | 127.0.0.1:53699 | http basic | delete document ok | /_api/document/collection1/21456?ignoreRevs=false
```
For example, if someones tries to delete a non-existing document, it will be logged as
```
2016-10-04 12:28:26 | server1 | user1 | database1 | 127.0.0.1:53699 | http basic | delete document failed | /_api/document/collection1/21456?ignoreRevs=false
```
!SECTION Queries
```
2016-10-06 12:12:10 | server1 | user1 | database1 | 127.0.0.1:54232 | http basic | query document | ok | for i in collection1 return i | /_api/cursor
```

View File

@ -0,0 +1,28 @@
!CHAPTER Auditing
*This feature is available in the Enterprise Edition.*
Auditing allows you to monitor access to the database in detail. In general
audit logs are of the form
```
2016-01-01 12:00:00 | server | username | database | client-ip | authentication | text1 | text2 | ...
```
The *time-stamp* is in GMT. This allows to easily match log entries from servers
in different time zones.
The name of the *server*. You can specify a custom name on startup. Otherwise
the default hostname is used.
The *username* is the (authenticated or unauthenticated) name supplied by the
client. A dash `-` is printed if no name was given by the client.
The *database* describes the database that was accessed. Please note that there
are no database crossing queries. Each access is restricted to one database.
The *client-ip* describes the source of the request.
The *authentication* details the methods used to authenticate the user.
Details about the requests follow in the additional fields.

View File

@ -169,6 +169,10 @@
* [Upgrading to 2.3](Administration/Upgrading/Upgrading23.md)
* [Upgrading to 2.2](Administration/Upgrading/Upgrading22.md)
#
* [Auditing](Auditing/README.md)
* [Configuration](Auditing/AuditConfiguration.md)
* [Events](Auditing/AuditEvents.md)
#
* [Troubleshooting](Troubleshooting/README.md)
* [arangod](Troubleshooting/Arangod.md)
* [Emergency Console](Troubleshooting/EmergencyConsole.md)

View File

@ -1,5 +1,3 @@
@brief the group id to use for the process
`--gid gid`

View File

@ -1,4 +1,4 @@
.TH arangobench 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangobench 1 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangobench - the ArangoDB benchmark and test tool
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arangodump 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangodump 1 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangodump - a tool to create logical dumps of an ArangoDB database
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arangoimp 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangoimp 1 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangoimp - a bulk importer for the ArangoDB database
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arangorestore 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangorestore 1 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangorestore - a data restore tool for the ArangoDB database
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arangosh 1 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangosh 1 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangosh - the ArangoDB shell
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arango-dfdb 8 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arango-dfdb 8 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arango-dfdb - a datafile debugger for ArangoDB
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH arangod 8 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH arangod 8 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
arangod - the ArangoDB database server
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH foxx-manager 8 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH foxx-manager 8 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
foxx-manager - a Foxx application manager for ArangoDB
.SH SYNOPSIS

View File

@ -1,4 +1,4 @@
.TH rcarangod 8 "3.0.x-devel" "ArangoDB" "ArangoDB"
.TH rcarangod 8 "3.1.devel" "ArangoDB" "ArangoDB"
.SH NAME
rcarangod - control script for the ArangoDB database server
.SH SYNOPSIS

View File

@ -509,6 +509,7 @@ if test -n "${TARGET_DIR}"; then
dir="${TARGET_DIR}"
if [ -n "$CPACK" -a -n "${TARGET_DIR}" ]; then
${PACKAGE_MAKE} copy_packages
${PACKAGE_MAKE} clean_packages
else
TARFILE=arangodb-`uname`${TAR_SUFFIX}.tar.gz
TARFILE_TMP=`pwd`/arangodb.tar.$$

View File

@ -1 +1 @@
3.0.devel
3.1.devel

View File

@ -242,26 +242,11 @@ bool CleanOutServer::start() {
}
bool CleanOutServer::scheduleMoveShards() {
std::vector<std::string> availServers;
// Get servers from plan
Node::Children const& dbservers = _snapshot("/Plan/DBServers").children();
for (auto const& srv : dbservers) {
availServers.push_back(srv.first);
}
// Remove cleaned from ist
if (_snapshot.exists("/Target/CleanedServers").size() == 2) {
for (auto const& srv :
VPackArrayIterator(_snapshot("/Target/CleanedServers").slice())) {
availServers.erase(std::remove(availServers.begin(), availServers.end(),
srv.copyString()),
availServers.end());
}
}
std::vector<std::string> servers = availableServers();
// Minimum 1 DB server must remain
if (availServers.size() == 1) {
if (servers.size() == 1) {
LOG_TOPIC(ERR, Logger::AGENCY) << "DB server " << _server
<< " is the last standing db server.";
return false;
@ -271,9 +256,20 @@ bool CleanOutServer::scheduleMoveShards() {
size_t sub = 0;
for (auto const& database : databases) {
// Find shardsLike dependencies
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
auto const& collection = *(collptr.second);
try { // distributeShardsLike entry means we only follow
if (collection("distributeShardsLike").slice().copyString() != "") {
continue;
}
} catch (...) {}
for (auto const& shard : collection("shards").children()) {
bool found = false;
VPackArrayIterator dbsit(shard.second->slice());
@ -289,44 +285,45 @@ bool CleanOutServer::scheduleMoveShards() {
}
// Only destinations, which are not already holding this shard
std::vector<std::string> myServers = availServers;
for (auto const& dbserver : dbsit) {
myServers.erase(std::remove(myServers.begin(), myServers.end(),
dbserver.copyString()),
myServers.end());
servers.erase(
std::remove(servers.begin(), servers.end(), dbserver.copyString()),
servers.end());
}
// Among those a random destination
std::string toServer;
if (myServers.empty()) {
LOG_TOPIC(ERR, Logger::AGENCY) << "No servers remain as target for "
<< "MoveShard";
if (servers.empty()) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "No servers remain as target for MoveShard";
return false;
}
try {
toServer = myServers.at(rand() % myServers.size());
toServer = servers.at(rand() % servers.size());
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Range error picking destination for shard " + shard.first;
<< "Range error picking destination for shard " + shard.first;
}
// Schedule move
MoveShard(_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,
shard.first, _server, toServer);
}
}
}
return true;
}
bool CleanOutServer::checkFeasibility() {
// Server exists
if (_snapshot.exists("/Plan/DBServers/" + _server).size() != 3) {
LOG_TOPIC(ERR, Logger::AGENCY) << "No db server with id " << _server
<< " in plan.";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "No db server with id " << _server << " in plan.";
return false;
}
@ -334,8 +331,8 @@ bool CleanOutServer::checkFeasibility() {
for (auto const& srv :
VPackArrayIterator(_snapshot("/Target/CleanedServers").slice())) {
if (srv.copyString() == _server) {
LOG_TOPIC(ERR, Logger::AGENCY) << _server
<< " has been cleaned out already!";
LOG_TOPIC(ERR, Logger::AGENCY)
<< _server << " has been cleaned out already!";
return false;
}
}
@ -344,15 +341,14 @@ bool CleanOutServer::checkFeasibility() {
for (auto const& srv :
VPackObjectIterator(_snapshot("/Target/FailedServers").slice())) {
if (srv.key.copyString() == _server) {
LOG_TOPIC(ERR, Logger::AGENCY) << _server
<< " has failed!";
LOG_TOPIC(ERR, Logger::AGENCY) << _server << " has failed!";
return false;
}
}
if (_snapshot.exists(serverStatePrefix + _server + "/cleaning").size() == 4) {
LOG_TOPIC(ERR, Logger::AGENCY) << _server
<< " has been cleaned out already!";
LOG_TOPIC(ERR, Logger::AGENCY)
<< _server << " has been cleaned out already!";
return false;
}
@ -367,17 +363,18 @@ bool CleanOutServer::checkFeasibility() {
// Remove cleaned from ist
if (_snapshot.exists("/Target/CleanedServers").size() == 2) {
for (auto const& srv :
VPackArrayIterator(_snapshot("/Target/CleanedServers").slice())) {
availServers.erase(std::remove(availServers.begin(), availServers.end(),
srv.copyString()),
availServers.end());
VPackArrayIterator(_snapshot("/Target/CleanedServers").slice())) {
availServers.erase(
std::remove(
availServers.begin(), availServers.end(), srv.copyString()),
availServers.end());
}
}
// Minimum 1 DB server must remain
if (availServers.size() == 1) {
LOG_TOPIC(ERR, Logger::AGENCY) << "DB server " << _server
<< " is the last standing db server.";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "DB server " << _server << " is the last standing db server.";
return false;
}

View File

@ -54,7 +54,7 @@ FailedServer::~FailedServer() {}
bool FailedServer::start() {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Trying to start FailedLeader job" + _jobId + " for server " + _server;
<< "Trying to start FailedServer job" + _jobId + " for server " + _server;
// Copy todo to pending
Builder todo, pending;
@ -118,7 +118,7 @@ bool FailedServer::start() {
if (res.accepted && res.indices.size() == 1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Pending: DB Server " + _server + " failed.";
<< "Pending job for failed DB Server " << _server;
auto const& databases = _snapshot("/Plan/Collections").children();
auto const& current = _snapshot("/Current/Collections").children();
@ -130,19 +130,19 @@ bool FailedServer::start() {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
if (!cdatabase.find(collptr.first)->second->children().empty()) {
Node const& collection = *(collptr.second);
Node const& replicationFactor = collection("replicationFactor");
if (replicationFactor.slice().getUInt() > 1) {
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
// Only proceed if leader and create job
if ((*dbsit.begin()).copyString() != _server) {
continue;
}
FailedLeader(
_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,

View File

@ -321,6 +321,14 @@ void Inception::run() {
gossip();
}
// 4. If still incomplete bail out :(
config = _agent->config();
if (!config.poolComplete()) {
LOG_TOPIC(FATAL, Logger::AGENCY)
<< "Failed to build environment for RAFT algorithm. Bailing out!";
FATAL_ERROR_EXIT();
}
_agent->ready(true);
}

179
arangod/Agency/Job.cpp Normal file
View File

@ -0,0 +1,179 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Kaveh Vahedipour
////////////////////////////////////////////////////////////////////////////////
#include "Job.h"
using namespace arangodb::consensus;
Job::Job(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix) :
_snapshot(snapshot),
_agent(agent),
_jobId(jobId),
_creator(creator),
_agencyPrefix(agencyPrefix),
_jb(nullptr) {}
Job::~Job() {}
JOB_STATUS Job::exists() const {
Node const& target = _snapshot("/Target");
if (target.exists(std::string("/ToDo/") + _jobId).size() == 2) {
return TODO;
} else if (target.exists(std::string("/Pending/") + _jobId).size() == 2) {
return PENDING;
} else if (target.exists(std::string("/Finished/") + _jobId).size() == 2) {
return FINISHED;
} else if (target.exists(std::string("/Failed/") + _jobId).size() == 2) {
return FAILED;
}
return NOTFOUND;
}
bool Job::finish(std::string const& type, bool success,
std::string const& reason) const {
Builder pending, finished;
// Get todo entry
pending.openArray();
if (_snapshot.exists(pendingPrefix + _jobId).size() == 3) {
_snapshot(pendingPrefix + _jobId).toBuilder(pending);
} else if (_snapshot.exists(toDoPrefix + _jobId).size() == 3) {
_snapshot(toDoPrefix + _jobId).toBuilder(pending);
} else {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Nothing in pending to finish up for job " << _jobId;
return false;
}
pending.close();
std::string jobType;
try {
jobType = pending.slice()[0].get("type").copyString();
} catch (std::exception const& e) {
LOG_TOPIC(WARN, Logger::AGENCY)
<< "Failed to obtain type of job " << _jobId;
}
// Prepare peding entry, block toserver
finished.openArray();
// --- Add finished
finished.openObject();
finished.add(
_agencyPrefix + (success ? finishedPrefix : failedPrefix) + _jobId,
VPackValue(VPackValueType::Object));
finished.add(
"timeFinished",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(pending.slice()[0])) {
finished.add(obj.key.copyString(), obj.value);
}
if (!reason.empty()) {
finished.add("reason", VPackValue(reason));
}
finished.close();
// --- Delete pending
finished.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
// --- Delete todo
finished.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
// --- Remove block if specified
if (jobType == "moveShard") {
for (auto const& shard :
VPackArrayIterator(pending.slice()[0].get("shards"))) {
finished.add(_agencyPrefix + "/Supervision/Shards/" + shard.copyString(),
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
}
} else if (type != "") {
finished.add(_agencyPrefix + "/Supervision/" + type,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
}
// --- Need precond?
finished.close();
finished.close();
write_ret_t res = transact(_agent, finished);
if (res.accepted && res.indices.size() == 1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Successfully finished job " << type << "(" << _jobId << ")";
return true;
}
return false;
}
std::vector<std::string> Job::availableServers() const {
std::vector<std::string> ret;
// Get servers from plan
Node::Children const& dbservers = _snapshot(plannedServers).children();
for (auto const& srv : dbservers) {
ret.push_back(srv.first);
}
// Remove cleaned servers from ist
try {
for (auto const& srv :
VPackArrayIterator(_snapshot(cleanedPrefix).slice())) {
ret.erase(
std::remove(ret.begin(), ret.end(), srv.copyString()),
ret.end());
}
} catch (...) {}
// Remove failed servers from list
try {
for (auto const& srv :
VPackArrayIterator(_snapshot(failedServersPrefix).slice())) {
ret.erase(
std::remove(ret.begin(), ret.end(), srv.copyString()),
ret.end());
}
} catch (...) {}
return ret;
}

View File

@ -53,6 +53,7 @@ static std::string const blockedServersPrefix = "/Supervision/DBServers/";
static std::string const blockedShardsPrefix = "/Supervision/Shards/";
static std::string const serverStatePrefix = "/Sync/ServerStates/";
static std::string const planVersion = "/Plan/Version";
static std::string const plannedServers = "/Plan/DBServers";
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
Builder const& transaction,
@ -91,108 +92,24 @@ struct JobCallback {
};
struct Job {
Job(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix)
: _snapshot(snapshot),
_agent(agent),
_jobId(jobId),
_creator(creator),
_agencyPrefix(agencyPrefix),
_jb(nullptr) {}
std::string const& creator, std::string const& agencyPrefix);
virtual ~Job() {}
virtual ~Job();
virtual JOB_STATUS exists() const {
Node const& target = _snapshot("/Target");
if (target.exists(std::string("/ToDo/") + _jobId).size() == 2) {
return TODO;
} else if (target.exists(std::string("/Pending/") + _jobId).size() == 2) {
return PENDING;
} else if (target.exists(std::string("/Finished/") + _jobId).size() == 2) {
return FINISHED;
} else if (target.exists(std::string("/Failed/") + _jobId).size() == 2) {
return FAILED;
}
return NOTFOUND;
}
virtual JOB_STATUS exists() const;
virtual bool finish(std::string const& type, bool success = true,
std::string const& reason = std::string()) const {
Builder pending, finished;
// Get todo entry
pending.openArray();
if (_snapshot.exists(pendingPrefix + _jobId).size() == 3) {
_snapshot(pendingPrefix + _jobId).toBuilder(pending);
} else if (_snapshot.exists(toDoPrefix + _jobId).size() == 3) {
_snapshot(toDoPrefix + _jobId).toBuilder(pending);
} else {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Nothing in pending to finish up for job " << _jobId;
return false;
}
pending.close();
// Prepare peding entry, block toserver
finished.openArray();
// --- Add finished
finished.openObject();
finished.add(
_agencyPrefix + (success ? finishedPrefix : failedPrefix) + _jobId,
VPackValue(VPackValueType::Object));
finished.add(
"timeFinished",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(pending.slice()[0])) {
finished.add(obj.key.copyString(), obj.value);
}
if (!reason.empty()) {
finished.add("reason", VPackValue(reason));
}
finished.close();
// --- Delete pending
finished.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
// --- Delete todo
finished.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
// --- Remove block if specified
if (type != "") {
finished.add(_agencyPrefix + "/Supervision/" + type,
VPackValue(VPackValueType::Object));
finished.add("op", VPackValue("delete"));
finished.close();
}
// --- Need precond?
finished.close();
finished.close();
write_ret_t res = transact(_agent, finished);
if (res.accepted && res.indices.size() == 1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Successfully finished job " << type << "(" << _jobId << ")";
return true;
}
return false;
}
std::string const& reason = std::string()) const;
virtual JOB_STATUS status() = 0;
virtual bool create() = 0;
virtual bool start() = 0;
virtual std::vector<std::string> availableServers() const;
Node const _snapshot;
Agent* _agent;
std::string _jobId;
@ -200,7 +117,9 @@ struct Job {
std::string _agencyPrefix;
std::shared_ptr<Builder> _jb;
};
}
}

View File

@ -59,10 +59,10 @@ MoveShard::~MoveShard() {}
bool MoveShard::create() {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: Move shard " + _shard + " from " + _from + " to " << _to;
<< "Todo: Move shard " + _shard + " from " + _from + " to " << _to;
std::string path, now(timepointToString(std::chrono::system_clock::now()));
// DBservers
std::string curPath =
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
@ -89,15 +89,23 @@ bool MoveShard::create() {
_jb->add("creator", VPackValue(_creator));
_jb->add("type", VPackValue("moveShard"));
_jb->add("database", VPackValue(_database));
_jb->add("collection", VPackValue(_collection));
_jb->add("shard", VPackValue(_shard));
_jb->add(VPackValue("collections"));
{
VPackArrayBuilder b(_jb.get());
_jb->add(VPackValue(_collection));
}
_jb->add(VPackValue("shards"));
{
VPackArrayBuilder b(_jb.get());
_jb->add(VPackValue(_shard));
}
_jb->add("fromServer", VPackValue(_from));
_jb->add("toServer", VPackValue(_to));
_jb->add("isLeader", VPackValue(current[0].copyString() == _from));
_jb->add("jobId", VPackValue(_jobId));
_jb->add("timeCreated", VPackValue(now));
_jb->close();
_jb->close();
_jb->close();
@ -112,18 +120,66 @@ bool MoveShard::create() {
}
bool MoveShard::start() {
// Are we distributeShardsLiking other shard?
// Invoke moveShard there
auto collection = _snapshot(planColPrefix + _database + "/" + _collection);
auto myshards = _snapshot(
planColPrefix + _database + "/" + _collection + "/shards").children();
auto mpos = std::distance(myshards.begin(),
myshards.find(_shard));
std::string distributeShardsLike;
while(true) {
try {
distributeShardsLike = collection("distributeShardsLike").getString();
if (!distributeShardsLike.empty()) {
_collection = distributeShardsLike;
collection = _snapshot(planColPrefix + _database + "/" + _collection);
auto othershards =
_snapshot(planColPrefix + _database + "/" + _collection + "/shards")
.children();
auto opos = othershards.begin();
std::advance(opos, mpos);
_shard = opos->first;
}
} catch(...) {
break;
}
}
// Are we ditributeShardsLiked by others?
// Invoke moveShard here with others
auto collections = _snapshot(planColPrefix + _database).children();
std::vector<std::string> colsLikeMe;
std::vector<std::string> shardsLikeMe;
colsLikeMe.push_back(_collection);
shardsLikeMe.push_back(_shard);
for (auto const& collptr : collections) {
auto const& node = *(collptr.second);
try {
if (node("distributeShardsLike").getString() == _collection) {
auto opos = node("shards").children().begin();
if (!node("shards").children().empty()) {
std::advance(opos, mpos);
colsLikeMe.push_back(collptr.first);
shardsLikeMe.push_back(opos->first);
}
}
} catch (...) {}
}
// DBservers
std::string planPath =
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
std::string curPath =
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
Slice current = _snapshot(curPath).slice();
Slice planned = _snapshot(planPath).slice();
TRI_ASSERT(current.isArray());
TRI_ASSERT(planned.isArray());
for (auto const& srv : VPackArrayIterator(current)) {
TRI_ASSERT(srv.isString());
if (srv.copyString() == _to) {
@ -155,10 +211,14 @@ bool MoveShard::start() {
return false;
}
} else {
todo.add(_jb->slice()[0].valueAt(0));
try {
todo.add(_jb->slice()[0].get(_agencyPrefix + toDoPrefix + _jobId));
} catch (std::exception const& e) {
LOG_TOPIC(WARN, Logger::AGENCY) << e.what() << __FILE__ << __LINE__;
}
}
todo.close();
// Enter pending, remove todo, block toserver
pending.openArray();
@ -169,7 +229,22 @@ bool MoveShard::start() {
pending.add("timeStarted",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(todo.slice()[0])) {
pending.add(obj.key.copyString(), obj.value);
std::string key = obj.key.copyString();
if (key == "collections") {
pending.add(VPackValue(key));
VPackArrayBuilder b(&pending);
for (auto const& col : colsLikeMe) {
pending.add(VPackValue(col));
}
} else if (key == "shards") {
pending.add(VPackValue(key));
VPackArrayBuilder b(&pending);
for (auto const& shard : shardsLikeMe) {
pending.add(VPackValue(shard));
}
} else {
pending.add(obj.key.copyString(), obj.value);
}
}
pending.close();
@ -179,27 +254,39 @@ bool MoveShard::start() {
pending.add("op", VPackValue("delete"));
pending.close();
// --- Block shard
pending.add(_agencyPrefix + blockedShardsPrefix + _shard,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
// --- Plan changes
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
if (planned[0].copyString() == _from) { // Leader
pending.add(planned[0]);
pending.add(VPackValue(_to));
for (size_t i = 1; i < planned.length(); ++i) {
pending.add(planned[i]);
}
} else { // Follower
for (auto const& srv : VPackArrayIterator(planned)) {
pending.add(srv);
}
pending.add(VPackValue(_to));
// --- Block shards
for (auto const& shard : shardsLikeMe) {
pending.add(_agencyPrefix + blockedShardsPrefix + shard,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
}
// --- Plan changes
size_t j = 0;
for (auto const& c : colsLikeMe) {
planPath = planColPrefix + _database + "/" + c + "/shards/"
+ shardsLikeMe[j++];
planned = _snapshot(planPath).slice();
pending.add(VPackValue(_agencyPrefix + planPath));
{
VPackArrayBuilder b(&pending);
if (planned[0].copyString() == _from) { // Leader
pending.add(planned[0]);
pending.add(VPackValue(_to));
for (size_t i = 1; i < planned.length(); ++i) {
pending.add(planned[i]);
}
} else { // Follower
for (auto const& srv : VPackArrayIterator(planned)) {
pending.add(srv);
}
pending.add(VPackValue(_to));
}
}
}
pending.close();
// --- Increment Plan/Version
pending.add(_agencyPrefix + planVersion, VPackValue(VPackValueType::Object));
@ -244,131 +331,144 @@ JOB_STATUS MoveShard::status() {
try {
_database = _snapshot(pos[status] + _jobId + "/database").getString();
_collection = _snapshot(pos[status] + _jobId + "/collection").getString();
_collection =
_snapshot(pos[status] + _jobId + "/collections").slice()[0].copyString();
_from = _snapshot(pos[status] + _jobId + "/fromServer").getString();
_to = _snapshot(pos[status] + _jobId + "/toServer").getString();
_shard = _snapshot(pos[status] + _jobId + "/shard").getString();
_shard =
_snapshot(pos[status] + _jobId + "/shards").slice()[0].copyString();
} catch (std::exception const& e) {
std::stringstream err;
err << "Failed to find job " << _jobId << " in agency: " << e.what();
LOG_TOPIC(ERR, Logger::AGENCY) << err.str();
finish("Shards/" + _shard, false, err.str());
std::string err =
std::string("Failed to find job ") + _jobId + " in agency: " + e.what();
LOG_TOPIC(ERR, Logger::AGENCY) << err;
finish("Shards/" + _shard, false, err);
return FAILED;
}
}
if (status == PENDING) {
std::string planPath =
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
std::string curPath = curColPrefix + _database + "/" + _collection + "/" +
_shard + "/servers";
Slice current = _snapshot(curPath).slice();
Slice plan = _snapshot(planPath).slice();
Slice collections = _snapshot(pos[status] + _jobId + "/collections").slice();
Slice shards = _snapshot(pos[status] + _jobId + "/shards").slice();
std::vector<std::string> planv, currv;
for (auto const& srv : VPackArrayIterator(plan)) {
planv.push_back(srv.copyString());
}
std::sort(planv.begin(), planv.end());
for (auto const& srv : VPackArrayIterator(current)) {
currv.push_back(srv.copyString());
}
std::sort(currv.begin(), currv.end());
size_t i = 0;
size_t done = 0;
for (auto const& collslice : VPackArrayIterator(collections)) {
if (currv == planv) {
if (current[0].copyString() ==
std::string("_") + _from) { // Retired leader
std::string shard = shards[i++].copyString();
std::string collection = collslice.copyString();
Builder remove; // remove
remove.openArray();
remove.openObject();
// --- Plan changes
remove.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
for (size_t i = 1; i < plan.length(); ++i) {
remove.add(plan[i]);
}
remove.close();
// --- Plan version
remove.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
remove.add("op", VPackValue("increment"));
remove.close();
remove.close();
remove.close();
transact(_agent, remove);
std::string planPath =
planColPrefix + _database + "/" + collection + "/shards/" + shard;
std::string curPath = curColPrefix + _database + "/" + collection + "/" +
shard + "/servers";
return PENDING;
Slice current = _snapshot(curPath).slice();
Slice plan = _snapshot(planPath).slice();
} else {
bool foundFrom = false, foundTo = false;
for (auto const& srv : VPackArrayIterator(current)) {
std::string srv_str = srv.copyString();
if (srv_str == _from) {
foundFrom = true;
std::vector<std::string> planv, currv;
for (auto const& srv : VPackArrayIterator(plan)) {
planv.push_back(srv.copyString());
}
std::sort(planv.begin(), planv.end());
for (auto const& srv : VPackArrayIterator(current)) {
currv.push_back(srv.copyString());
}
std::sort(currv.begin(), currv.end());
if (currv == planv) {
if (current[0].copyString() ==
std::string("_") + _from) { // Retired leader
Builder remove; // remove
remove.openArray();
remove.openObject();
// --- Plan changes
remove.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
for (size_t i = 1; i < plan.length(); ++i) {
remove.add(plan[i]);
}
if (srv_str == _to) {
foundTo = true;
}
}
if (foundFrom && foundTo) {
if (plan[0].copyString() == _from) { // Leader
Builder underscore; // serverId -> _serverId
underscore.openArray();
underscore.openObject();
// --- Plan changes
underscore.add(_agencyPrefix + planPath,
VPackValue(VPackValueType::Array));
underscore.add(VPackValue(std::string("_") + plan[0].copyString()));
for (size_t i = 1; i < plan.length(); ++i) {
underscore.add(plan[i]);
remove.close();
// --- Plan version
remove.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
remove.add("op", VPackValue("increment"));
remove.close();
remove.close();
remove.close();
transact(_agent, remove);
} else {
bool foundFrom = false, foundTo = false;
for (auto const& srv : VPackArrayIterator(current)) {
std::string srv_str = srv.copyString();
if (srv_str == _from) {
foundFrom = true;
}
underscore.close();
// --- Plan version
underscore.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
underscore.add("op", VPackValue("increment"));
underscore.close();
underscore.close();
underscore.close();
transact(_agent, underscore);
} else {
Builder remove;
remove.openArray();
remove.openObject();
// --- Plan changes
remove.add(_agencyPrefix + planPath,
VPackValue(VPackValueType::Array));
for (auto const& srv : VPackArrayIterator(plan)) {
if (srv.copyString() != _from) {
remove.add(srv);
if (srv_str == _to) {
foundTo = true;
}
}
if (foundFrom && foundTo) {
if (plan[0].copyString() == _from) { // Leader
Builder underscore; // serverId -> _serverId
underscore.openArray();
underscore.openObject();
// --- Plan changes
underscore.add(_agencyPrefix + planPath,
VPackValue(VPackValueType::Array));
underscore.add(VPackValue(std::string("_") + plan[0].copyString()));
for (size_t i = 1; i < plan.length(); ++i) {
underscore.add(plan[i]);
}
underscore.close();
// --- Plan version
underscore.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
underscore.add("op", VPackValue("increment"));
underscore.close();
underscore.close();
underscore.close();
transact(_agent, underscore);
} else {
Builder remove;
remove.openArray();
remove.openObject();
// --- Plan changes
remove.add(_agencyPrefix + planPath,
VPackValue(VPackValueType::Array));
for (auto const& srv : VPackArrayIterator(plan)) {
if (srv.copyString() != _from) {
remove.add(srv);
}
}
remove.close();
// --- Plan version
remove.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
remove.add("op", VPackValue("increment"));
remove.close();
remove.close();
remove.close();
transact(_agent, remove);
}
remove.close();
// --- Plan version
remove.add(_agencyPrefix + planVersion,
VPackValue(VPackValueType::Object));
remove.add("op", VPackValue("increment"));
remove.close();
remove.close();
remove.close();
transact(_agent, remove);
}
return PENDING;
} else if (foundTo && !foundFrom) {
if (finish("Shards/" + _shard)) {
return FINISHED;
} else if (foundTo && !foundFrom) {
done++;
}
}
}
}
}
if (done == collections.length()) {
if (finish("Shards/" + _shard)) {
return FINISHED;
}
}
}
return status;
}

View File

@ -208,12 +208,12 @@ bool RemoveServer::start() {
try {
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
} catch (std::exception const&) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Failed to get key " + toDoPrefix +
_jobId + " from agency snapshot";
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Failed to get key " + toDoPrefix + _jobId + " from agency snapshot";
return false;
}
} else {
todo.add(_jb->slice()[0].valueAt(0));
todo.add(_jb->slice()[0].get(_agencyPrefix + toDoPrefix + _jobId));
}
todo.close();
@ -285,32 +285,11 @@ bool RemoveServer::start() {
}
bool RemoveServer::scheduleAddFollowers() {
std::vector<std::string> availServers;
// Get servers from plan
Node::Children const& dbservers = _snapshot("/Plan/DBServers").children();
for (auto const& srv : dbservers) {
availServers.push_back(srv.first);
}
// Remove cleaned from ist
for (auto const& srv :
VPackArrayIterator(_snapshot("/Target/CleanedServers").slice())) {
availServers.erase(std::remove(availServers.begin(), availServers.end(),
srv.copyString()),
availServers.end());
}
// Remove failed from list
for (auto const& srv :
VPackObjectIterator(_snapshot("/Target/FailedServers").slice())) {
availServers.erase(std::remove(availServers.begin(), availServers.end(),
srv.key.copyString()),
availServers.end());
}
std::vector<std::string> servers = availableServers();
// Minimum 1 DB server must remain
if (availServers.size() == 1) {
if (servers.size() == 1) {
LOG_TOPIC(ERR, Logger::AGENCY) << "DB server " << _server
<< " is the last standing db server.";
return false;
@ -320,10 +299,20 @@ bool RemoveServer::scheduleAddFollowers() {
size_t sub = 0;
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
try { // distributeShardsLike entry means we only follow
if (collection("distributeShardsLike").slice().copyString() != "") {
continue;
}
} catch (...) {}
uint64_t replFactor = collection("replicationFactor").getUInt();
Node::Children const& shards = collection("shards").children();
// mop: special case..we already have at least one more follower than we
// should have...
// we could simply kill the server now...
@ -331,6 +320,7 @@ bool RemoveServer::scheduleAddFollowers() {
if (shards.size() > replFactor) {
continue;
}
for (auto const& shard : shards) {
bool found = false;
VPackArrayIterator dbsit(shard.second->slice());
@ -347,22 +337,21 @@ bool RemoveServer::scheduleAddFollowers() {
}
// Only destinations, which are not already holding this shard
std::vector<std::string> myServers = availServers;
for (auto const& dbserver : dbsit) {
myServers.erase(std::remove(myServers.begin(), myServers.end(),
dbserver.copyString()),
myServers.end());
servers.erase(
std::remove(servers.begin(), servers.end(), dbserver.copyString()),
servers.end());
}
// Among those a random destination
std::string newServer;
if (myServers.empty()) {
LOG_TOPIC(ERR, Logger::AGENCY) << "No servers remain as target for "
<< "RemoveServer";
if (servers.empty()) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "No servers remain as target for RemoveServer";
return false;
}
newServer = myServers.at(rand() % myServers.size());
newServer = servers.at(rand() % servers.size());
AddFollower(_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,

View File

@ -50,14 +50,18 @@ Supervision::Supervision()
_agent(nullptr),
_snapshot("Supervision"),
_frequency(5),
_gracePeriod(120),
_gracePeriod(15),
_jobId(0),
_jobIdMax(0),
_selfShutdown(false) {}
Supervision::~Supervision() { shutdown(); };
void Supervision::wakeUp() { _cv.signal(); }
void Supervision::wakeUp() {
updateSnapshot();
upgradeAgency();
_cv.signal();
}
static std::string const syncPrefix = "/Sync/ServerStates/";
static std::string const healthPrefix = "/Supervision/Health/";
@ -67,6 +71,37 @@ static std::string const currentServersRegisteredPrefix =
"/Current/ServersRegistered";
static std::string const foxxmaster = "/Current/Foxxmaster";
void Supervision::upgradeAgency() {
try {
if (_snapshot(failedServersPrefix).slice().isArray()) {
Builder builder;
builder.openArray();
builder.openObject();
builder.add(
_agencyPrefix + failedServersPrefix, VPackValue(VPackValueType::Object));
for (auto const& failed :
VPackArrayIterator(_snapshot(failedServersPrefix).slice())) {
builder.add(failed.copyString(), VPackValue(VPackValueType::Object));
builder.close();
}
builder.close();
builder.close();
builder.close();
transact(_agent, builder);
}
} catch (std::exception const& e) {
Builder builder;
builder.openArray();
builder.openObject();
builder.add(
_agencyPrefix + failedServersPrefix, VPackValue(VPackValueType::Object));
builder.close();
builder.close();
builder.close();
transact(_agent, builder);
}
}
std::vector<check_t> Supervision::checkDBServers() {
std::vector<check_t> ret;
Node::Children const& machinesPlanned =
@ -334,14 +369,17 @@ std::vector<check_t> Supervision::checkCoordinators() {
}
bool Supervision::updateSnapshot() {
if (_agent == nullptr || this->isStopping()) {
return false;
}
try {
_snapshot = _agent->readDB().get(_agencyPrefix);
} catch (...) {
}
} catch (...) {}
return true;
}
bool Supervision::doChecks() {

View File

@ -107,6 +107,9 @@ class Supervision : public arangodb::Thread {
/// @brief Wake up to task
void wakeUp();
/// @brief Upgrade agency
void upgradeAgency();
private:
static constexpr const char* HEALTH_STATUS_GOOD = "GOOD";
static constexpr const char* HEALTH_STATUS_BAD = "BAD";

View File

@ -565,6 +565,21 @@ AstNode* Ast::createNodeCollection(char const* name,
_query->collections()->add(name, accessType);
if (ServerState::instance()->isRunningInCluster()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
// which does not exist, if only for some unit tests:
try {
auto coll = ci->getCollection(_query->vocbase()->name(), name);
auto names = coll->realNames();
for (auto const& n : names) {
_query->collections()->add(n, accessType);
}
}
catch (...) {
}
}
return node;
}
@ -988,6 +1003,7 @@ AstNode* Ast::createNodeWithCollections (AstNode const* collections) {
if (c->isStringValue()) {
std::string name = c->getString();
_query->collections()->add(name, TRI_TRANSACTION_READ);
if (ServerState::instance()->isRunningInCluster()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
@ -1000,10 +1016,7 @@ AstNode* Ast::createNodeWithCollections (AstNode const* collections) {
}
}
catch (...) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
} else { // single server
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
}// else bindParameter use default for collection bindVar
// We do not need to propagate these members
@ -1026,6 +1039,7 @@ AstNode* Ast::createNodeCollectionList(AstNode const* edgeCollections) {
auto ss = ServerState::instance();
auto doTheAdd = [&](std::string name) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
if (ss->isRunningInCluster()) {
try {
auto c = ci->getCollection(_query->vocbase()->name(), name);
@ -1035,10 +1049,7 @@ AstNode* Ast::createNodeCollectionList(AstNode const* edgeCollections) {
}
}
catch (...) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
} else {
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
};
@ -1476,19 +1487,21 @@ void Ast::injectBindParameters(BindParameters& parameters) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
auto eColls = graph->edgeCollections();
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
if (ServerState::instance()->isRunningInCluster()) {
auto ci = ClusterInfo::instance();
for (const auto& n : eColls) {
auto c = ci->getCollection(_query->vocbase()->name(), n);
auto names = c->realNames();
for (auto const& name : names) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
try {
auto c = ci->getCollection(_query->vocbase()->name(), n);
auto names = c->realNames();
for (auto const& name : names) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
} catch (...) {
}
}
} else {
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
}
}
} else if (node->type == NODE_TYPE_SHORTEST_PATH) {
@ -1503,19 +1516,21 @@ void Ast::injectBindParameters(BindParameters& parameters) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
auto eColls = graph->edgeCollections();
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
if (ServerState::instance()->isRunningInCluster()) {
auto ci = ClusterInfo::instance();
for (const auto& n : eColls) {
auto c = ci->getCollection(_query->vocbase()->name(), n);
auto names = c->realNames();
for (auto const& name : names) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
try {
auto c = ci->getCollection(_query->vocbase()->name(), n);
auto names = c->realNames();
for (auto const& name : names) {
_query->collections()->add(name, TRI_TRANSACTION_READ);
}
} catch (...) {
}
}
} else {
for (const auto& n : eColls) {
_query->collections()->add(n, TRI_TRANSACTION_READ);
}
}
}
}
@ -1528,7 +1543,21 @@ void Ast::injectBindParameters(BindParameters& parameters) {
// add all collections used in data-modification statements
for (auto& it : _writeCollections) {
if (it->type == NODE_TYPE_COLLECTION) {
_query->collections()->add(it->getString(), TRI_TRANSACTION_WRITE);
std::string name = it->getString();
_query->collections()->add(name, TRI_TRANSACTION_WRITE);
if (ServerState::instance()->isRunningInCluster()) {
auto ci = ClusterInfo::instance();
// We want to tolerate that a collection name is given here
// which does not exist, if only for some unit tests:
try {
auto coll = ci->getCollection(_query->vocbase()->name(), name);
auto names = coll->realNames();
for (auto const& n : names) {
_query->collections()->add(n, TRI_TRANSACTION_WRITE);
}
} catch (...) {
}
}
}
}

View File

@ -248,6 +248,9 @@ class DistributeNode : public ExecutionNode {
/// @brief return the collection
Collection const* collection() const { return _collection; }
/// @brief set collection
void setCollection(Collection* coll) { _collection = coll; }
private:
/// @brief the underlying database
TRI_vocbase_t* _vocbase;

View File

@ -414,13 +414,13 @@ std::vector<std::vector<arangodb::basics::AttributeName>> Condition::getConstAtt
if (lhs->isAttributeAccessForVariable(parts) &&
parts.first == reference) {
if (includeNull || (rhs->isConstant() && !rhs->isNullValue())) {
if (includeNull || ((rhs->isConstant() || rhs->type == NODE_TYPE_REFERENCE) && !rhs->isNullValue())) {
result.emplace_back(std::move(parts.second));
}
}
else if (rhs->isAttributeAccessForVariable(parts) &&
parts.first == reference) {
if (includeNull || (lhs->isConstant() && !lhs->isNullValue())) {
if (includeNull || ((lhs->isConstant() || lhs->type == NODE_TYPE_REFERENCE) && !lhs->isNullValue())) {
result.emplace_back(std::move(parts.second));
}
}
@ -657,6 +657,30 @@ void Condition::optimize(ExecutionPlan* plan) {
}
TRI_ASSERT(andNumMembers > 1);
// sort AND parts of each sub-condition so > and >= come before < and <=
// we use this to some advantage when we check the conditions for a sparse index
// later.
// if a sparse index is asked whether it can supported a condition such as `attr < value1`,
// this range would include `null`, which the sparse index cannot provide. however, if we
// first check other conditions we may find a condition on the same attribute, e.g. `attr > value2`.
// this other condition may exclude `null` so we then use the full range `value2 < attr < value1`
// and do not have to discard sub-conditions anymore
andNode->sortMembers([](AstNode const* lhs, AstNode const* rhs) {
if ((lhs->type != NODE_TYPE_OPERATOR_BINARY_LT && lhs->type != NODE_TYPE_OPERATOR_BINARY_LE) &&
(rhs->type == NODE_TYPE_OPERATOR_BINARY_LT || rhs->type == NODE_TYPE_OPERATOR_BINARY_LE)) {
// sort < and <= after other comparison operators
return true;
}
if ((lhs->type == NODE_TYPE_OPERATOR_BINARY_LT || lhs->type == NODE_TYPE_OPERATOR_BINARY_LE) &&
(rhs->type != NODE_TYPE_OPERATOR_BINARY_LT && rhs->type != NODE_TYPE_OPERATOR_BINARY_LE)) {
// sort < and <= after other comparison operators
return false;
}
// compare pointers as last resort
return (lhs->type < rhs->type);
});
if (inComparisons > 0) {
// move IN operations to the front to make comparison code below simpler
@ -686,7 +710,7 @@ void Condition::optimize(ExecutionPlan* plan) {
stack.pop_back();
}
}
// optimization is only necessary if an AND node has multiple members
VariableUsageType variableUsage;

View File

@ -114,6 +114,10 @@ class ExecutionNode {
/// @brief return the node's id
inline size_t id() const { return _id; }
/// @brief set the id, use with care! The purpose is to use a cloned node
/// together with the original in the same plan.
void setId(size_t id) { _id = id; }
/// @brief return the type of the node
virtual NodeType getType() const = 0;
@ -538,7 +542,7 @@ class ExecutionNode {
protected:
/// @brief node id
size_t const _id;
size_t _id;
/// @brief our dependent nodes
std::vector<ExecutionNode*> _dependencies;

View File

@ -367,6 +367,8 @@ struct FunctionDefiner {
false, true, &Functions::Near, NotInCoordinator});
add({"WITHIN", "AQL_WITHIN", "hs,n,n,n|s", true, false, true,
false, true, &Functions::Within, NotInCoordinator});
add({"DISTANCE", "AQL_DISTANCE", "n,n,n,n", true, true, false, true, true,
&Functions::Distance});
add({"WITHIN_RECTANGLE", "AQL_WITHIN_RECTANGLE", "hs,d,d,d,d", true,
false, true, false, true});
add({"IS_IN_POLYGON", "AQL_IS_IN_POLYGON", "l,ln|nb",

View File

@ -2294,6 +2294,60 @@ AqlValue Functions::Within(arangodb::aql::Query* query,
return buildGeoResult(trx, query, cors, cid, attributeName);
}
/// @brief function DISTANCE
AqlValue Functions::Distance(arangodb::aql::Query* query,
arangodb::Transaction* trx,
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "DISTANCE", 4, 4);
AqlValue lat1 = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue lon1 = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue lat2 = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue lon2 = ExtractFunctionParameterValue(trx, parameters, 3);
// non-numeric input...
if (!lat1.isNumber() || !lon1.isNumber() || !lat2.isNumber() || !lon2.isNumber()) {
RegisterWarning(query, "DISTANCE",
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
bool failed;
bool error = false;
double lat1Value = lat1.toDouble(trx, failed);
error |= failed;
double lon1Value = lon1.toDouble(trx, failed);
error |= failed;
double lat2Value = lat2.toDouble(trx, failed);
error |= failed;
double lon2Value = lon2.toDouble(trx, failed);
error |= failed;
if (error) {
RegisterWarning(query, "DISTANCE",
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
auto toRadians = [](double degrees) -> double {
return degrees * (std::acos(-1.0) / 180.0);
};
double p1 = toRadians(lat1Value);
double p2 = toRadians(lat2Value);
double d1 = toRadians(lat2Value - lat1Value);
double d2 = toRadians(lon2Value - lon1Value);
double a = std::sin(d1 / 2.0) * std::sin(d1 / 2.0) +
std::cos(p1) * std::cos(p2) *
std::sin(d2 / 2.0) * std::sin(d2 / 2.0);
double c = 2.0 * std::atan2(std::sqrt(a), std::sqrt(1.0 - a));
double const EARTHRADIAN = 6371000.0; // metres
return NumberValue(trx, EARTHRADIAN * c, true);
}
/// @brief function FLATTEN
AqlValue Functions::Flatten(arangodb::aql::Query* query,
arangodb::Transaction* trx,

View File

@ -155,6 +155,8 @@ struct Functions {
VPackFunctionParameters const&);
static AqlValue Within(arangodb::aql::Query*, arangodb::Transaction*,
VPackFunctionParameters const&);
static AqlValue Distance(arangodb::aql::Query*, arangodb::Transaction*,
VPackFunctionParameters const&);
static AqlValue Flatten(arangodb::aql::Query*, arangodb::Transaction*,
VPackFunctionParameters const&);
static AqlValue Zip(arangodb::aql::Query*, arangodb::Transaction*,

View File

@ -71,7 +71,10 @@ class ModificationNode : public ExecutionNode {
TRI_vocbase_t* vocbase() const { return _vocbase; }
/// @brief return the collection
Collection const* collection() const { return _collection; }
Collection* collection() const { return _collection; }
/// @brief modify collection afterwards
void setCollection(Collection* coll) { _collection = coll; }
/// @brief estimateCost
/// Note that all the modifying nodes use this estimateCost method which is
@ -111,6 +114,16 @@ class ModificationNode : public ExecutionNode {
/// @brief clear the "$NEW" out variable
void clearOutVariableNew() { _outVariableNew = nullptr; }
/// @brief set the "$OLD" out variable
void setOutVariableOld(Variable const* oldVar) {
_outVariableOld = oldVar;
}
/// @brief set the "$NEW" out variable
void setOutVariableNew(Variable const* newVar) {
_outVariableNew = newVar;
}
/// @brief whether or not the node is a data modification node
bool isModificationNode() const override { return true; }
@ -173,6 +186,10 @@ class RemoveNode : public ModificationNode {
vars.emplace(_inVariable);
}
void setInVariable(Variable const* var) {
_inVariable = var;
}
private:
/// @brief input variable
Variable const* _inVariable;
@ -221,6 +238,10 @@ class InsertNode : public ModificationNode {
vars.emplace(_inVariable);
}
void setInVariable(Variable const* var) {
_inVariable = var;
}
private:
/// @brief input variable
Variable const* _inVariable;
@ -283,6 +304,11 @@ class UpdateNode : public ModificationNode {
}
}
/// @brief set the input document variable
void setInDocVariable(Variable const* var) {
_inDocVariable = var;
}
private:
/// @brief input variable for documents
Variable const* _inDocVariable;
@ -348,6 +374,11 @@ class ReplaceNode : public ModificationNode {
}
}
/// @brief set the input document variable
void setInDocVariable(Variable const* var) {
_inDocVariable = var;
}
private:
/// @brief input variable for documents
Variable const* _inDocVariable;
@ -413,6 +444,18 @@ class UpsertNode : public ModificationNode {
vars.emplace(_updateVariable);
}
void setInDocVariable(Variable const* var) {
_inDocVariable = var;
}
void setInsertVariable(Variable const* var) {
_insertVariable = var;
}
void setUpdateVariable(Variable const* var) {
_updateVariable = var;
}
private:
/// @brief input variable for the search document
Variable const* _inDocVariable;

View File

@ -43,6 +43,7 @@
#include "Basics/SmallVector.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"
#include "Cluster/ClusterInfo.h"
#include "Utils/Transaction.h"
#include "VocBase/TraverserOptions.h"
@ -1863,6 +1864,14 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
// attributes
bool handled = false;
if (indexes.size() == 1 && isSorted) {
// if we have just a single index and we can use it for the filtering condition,
// then we can use the index for sorting, too. regardless of it the index is sparse or not.
// because the index would only return non-null attributes anyway, so we do not need
// to care about null values when sorting here
isSparse = false;
}
SortCondition sortCondition(_sorts, cond->getConstAttributes(outVariable, !isSparse), _variableDefinitions);
bool const isOnlyAttributeAccess =
@ -2471,6 +2480,16 @@ void arangodb::aql::distributeInClusterRule(Optimizer* opt, ExecutionPlan* plan,
Collection const* collection =
static_cast<ModificationNode*>(node)->collection();
#ifdef USE_ENTERPRISE
auto ci = ClusterInfo::instance();
auto collInfo = ci->getCollection(collection->vocbase->name(),
collection->name);
// Throws if collection is not found!
if (collInfo->isSmart() && collInfo->type() == TRI_COL_TYPE_EDGE) {
distributeInClusterRuleSmartEdgeCollection(opt, plan, rule);
return;
}
#endif
bool const defaultSharding = collection->usesDefaultSharding();
if (nodeType == ExecutionNode::REMOVE ||
@ -2541,7 +2560,7 @@ void arangodb::aql::distributeInClusterRule(Optimizer* opt, ExecutionPlan* plan,
distNode = new DistributeNode(plan, plan->nextId(), vocbase, collection,
inputVariable->id, false, v.size() > 1);
} else if (nodeType == ExecutionNode::UPSERT) {
// an UPSERT nodes has two input variables!
// an UPSERT node has two input variables!
std::vector<Variable const*> v(node->getVariablesUsedHere());
TRI_ASSERT(v.size() >= 2);

View File

@ -122,6 +122,11 @@ void scatterInClusterRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void distributeInClusterRule(Optimizer*, ExecutionPlan*,
Optimizer::Rule const*);
#ifdef USE_ENTERPRISE
void distributeInClusterRuleSmartEdgeCollection(Optimizer*, ExecutionPlan*,
Optimizer::Rule const*);
#endif
void distributeFilternCalcToClusterRule(Optimizer*, ExecutionPlan*,
Optimizer::Rule const*);

View File

@ -184,12 +184,73 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
std::unordered_map<std::string, TRI_edge_direction_e> seenCollections;
auto addEdgeColl = [&](std::string const& n, TRI_edge_direction_e dir) -> void {
if (_isSmart) {
if (n.compare(0, 6, "_from_") == 0) {
if (dir != TRI_EDGE_IN) {
_directions.emplace_back(TRI_EDGE_OUT);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
}
return;
} else if (n.compare(0, 4, "_to_") == 0) {
if (dir != TRI_EDGE_OUT) {
_directions.emplace_back(TRI_EDGE_IN);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
}
return;
}
}
if (dir == TRI_EDGE_ANY) {
_directions.emplace_back(TRI_EDGE_OUT);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
_directions.emplace_back(TRI_EDGE_IN);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
} else {
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
_directions.emplace_back(dir);
}
};
if (graph->type == NODE_TYPE_COLLECTION_LIST) {
size_t edgeCollectionCount = graph->numMembers();
_graphInfo.openArray();
_edgeColls.reserve(edgeCollectionCount);
_directions.reserve(edgeCollectionCount);
// First determine whether all edge collections are smart and sharded
// like a common collection:
auto ci = ClusterInfo::instance();
if (ServerState::instance()->isRunningInCluster()) {
_isSmart = true;
std::string distributeShardsLike;
for (size_t i = 0; i < edgeCollectionCount; ++i) {
auto col = graph->getMember(i);
if (col->type == NODE_TYPE_DIRECTION) {
col = col->getMember(1); // The first member always is the collection
}
std::string n = col->getString();
auto c = ci->getCollection(_vocbase->name(), n);
if (!c->isSmart() || c->distributeShardsLike().empty()) {
_isSmart = false;
break;
}
if (distributeShardsLike.empty()) {
distributeShardsLike = c->distributeShardsLike();
} else if (distributeShardsLike != c->distributeShardsLike()) {
_isSmart = false;
break;
}
}
}
// List of edge collection names
for (size_t i = 0; i < edgeCollectionCount; ++i) {
auto col = graph->getMember(i);
@ -219,8 +280,7 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
}
seenCollections.emplace(eColName, dir);
auto eColType = resolver->getCollectionTypeCluster(eColName);
if (eColType != TRI_COL_TYPE_EDGE) {
if (resolver->getCollectionTypeCluster(eColName) != TRI_COL_TYPE_EDGE) {
std::string msg("collection type invalid for collection '" +
std::string(eColName) +
": expecting collection type 'edge'");
@ -229,19 +289,23 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
}
_graphInfo.add(VPackValue(eColName));
if (dir == TRI_EDGE_ANY) {
// If we have any direction we simply add it twice, once IN once OUT.
_directions.emplace_back(TRI_EDGE_OUT);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
eColName, _vocbase, TRI_TRANSACTION_READ));
_directions.emplace_back(TRI_EDGE_IN);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
eColName, _vocbase, TRI_TRANSACTION_READ));
if (ServerState::instance()->isRunningInCluster()) {
auto c = ci->getCollection(_vocbase->name(), eColName);
if (!c->isSmart()) {
addEdgeColl(eColName, dir);
} else {
std::vector<std::string> names;
if (_isSmart) {
names = c->realNames();
} else {
names = c->realNamesForRead();
}
for (auto const& name : names) {
addEdgeColl(name, baseDirection);
}
}
} else {
_directions.emplace_back(dir);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
eColName, _vocbase, TRI_TRANSACTION_READ));
addEdgeColl(eColName, dir);
}
}
_graphInfo.close();
@ -275,7 +339,7 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
break;
}
if (distributeShardsLike.empty()) {
distributeShardsLike = c->distributeShardsLike().empty();
distributeShardsLike = c->distributeShardsLike();
} else if (distributeShardsLike != c->distributeShardsLike()) {
_isSmart = false;
break;
@ -283,44 +347,11 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
}
}
auto addEdgeColl = [&](std::string const& n) -> void {
if (_isSmart) {
if (n.compare(0, 6, "_from_") == 0) {
if (baseDirection == TRI_EDGE_ANY || baseDirection == TRI_EDGE_OUT) {
_directions.emplace_back(TRI_EDGE_OUT);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
}
return;
} else if (n.compare(0, 4, "_to_") == 0) {
if (baseDirection == TRI_EDGE_ANY || baseDirection == TRI_EDGE_IN) {
_directions.emplace_back(TRI_EDGE_IN);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
}
return;
}
}
if (baseDirection == TRI_EDGE_ANY) {
_directions.emplace_back(TRI_EDGE_OUT);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
_directions.emplace_back(TRI_EDGE_IN);
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
} else {
_edgeColls.emplace_back(std::make_unique<aql::Collection>(
n, _vocbase, TRI_TRANSACTION_READ));
_directions.emplace_back(baseDirection);
}
};
for (const auto& n : eColls) {
if (ServerState::instance()->isRunningInCluster()) {
auto c = ci->getCollection(_vocbase->name(), n);
if (!c->isSmart()) {
addEdgeColl(n);
addEdgeColl(n, baseDirection);
} else {
std::vector<std::string> names;
if (_isSmart) {
@ -329,11 +360,11 @@ TraversalNode::TraversalNode(ExecutionPlan* plan, size_t id,
names = c->realNamesForRead();
}
for (auto const& name : names) {
addEdgeColl(name);
addEdgeColl(name, baseDirection);
}
}
} else {
addEdgeColl(n);
addEdgeColl(n, baseDirection);
}
}
@ -1154,6 +1185,7 @@ void TraversalNode::getConditionVariables(
}
}
#ifndef USE_ENTERPRISE
void TraversalNode::enhanceEngineInfo(VPackBuilder& builder) const {
if (_graphObj != nullptr) {
_graphObj->enhanceEngineInfo(builder);
@ -1161,6 +1193,7 @@ void TraversalNode::enhanceEngineInfo(VPackBuilder& builder) const {
// TODO enhance the Info based on EdgeCollections.
}
}
#endif
#ifdef TRI_ENABLE_MAINTAINER_MODE
void TraversalNode::checkConditionsDefined() const {

View File

@ -70,9 +70,18 @@ else ()
set(ROCKSDB_FILES "")
endif ()
if (USE_ENTERPRISE)
set(ENTERPRISE_FILES "")
else ()
set(ENTERPRISE_FILES
Utils/Events.cpp
)
endif ()
add_executable(${BIN_ARANGOD}
${ProductVersionFiles}
${ROCKSDB_FILES}
${ENTERPRISE_FILES}
Actions/ActionFeature.cpp
Actions/RestActionHandler.cpp
Actions/actions.cpp
@ -89,6 +98,7 @@ add_executable(${BIN_ARANGOD}
Agency/FailedServer.cpp
Agency/GossipCallback.cpp
Agency/Inception.cpp
Agency/Job.cpp
Agency/MoveShard.cpp
Agency/NotifyCallback.cpp
Agency/Node.cpp

File diff suppressed because it is too large Load Diff

View File

@ -357,7 +357,7 @@ static int distributeBabyOnShards(
// We have invalid input at this point.
// However we can work with the other babies.
// This is for compatibility with single server
// We just asign it to any shard and pretend the user has given a key
// We just assign it to any shard and pretend the user has given a key
std::shared_ptr<std::vector<ShardID>> shards = ci->getShardList(collid);
shardID = shards->at(0);
userSpecifiedKey = true;
@ -1789,7 +1789,20 @@ int getFilteredEdgesOnCoordinator(
std::shared_ptr<LogicalCollection> collinfo =
ci->getCollection(dbname, collname);
auto shards = collinfo->shardIds();
std::shared_ptr<std::unordered_map<std::string, std::vector<std::string>>> shards;
if (collinfo->isSmart() && collinfo->type() == TRI_COL_TYPE_EDGE) {
auto names = collinfo->realNamesForRead();
shards = std::make_shared<std::unordered_map<std::string, std::vector<std::string>>>();
for (auto const& n : names) {
collinfo = ci->getCollection(dbname, n);
auto smap = collinfo->shardIds();
for (auto const& x : *smap) {
shards->insert(x);
}
}
} else {
shards = collinfo->shardIds();
}
std::string queryParameters = "?vertex=" + StringUtils::urlEncode(vertex);
if (direction == TRI_EDGE_IN) {
queryParameters += "&direction=in";

View File

@ -121,8 +121,8 @@ void HttpCommTask::addResponse(HttpResponse* response) {
// set "connection" header, keep-alive is the default
response->setConnectionType(
_closeRequested ? rest::ConnectionType::CONNECTION_CLOSE
: rest::ConnectionType::CONNECTION_KEEP_ALIVE);
_closeRequested ? rest::ConnectionType::C_CLOSE
: rest::ConnectionType::C_KEEP_ALIVE);
size_t const responseBodyLength = response->bodySize();

View File

@ -39,6 +39,8 @@ std::atomic_uint_fast64_t NEXT_HANDLER_ID(
static_cast<uint64_t>(TRI_microtime() * 100000.0));
}
thread_local RestHandler const* RestHandler::CURRENT_HANDLER = nullptr;
RestHandler::RestHandler(GeneralRequest* request, GeneralResponse* response)
: _handlerId(NEXT_HANDLER_ID.fetch_add(1, std::memory_order_seq_cst)),
_request(request),

View File

@ -45,6 +45,9 @@ class RestHandler : public RequestStatisticsAgent, public arangodb::WorkItem {
RestHandler(RestHandler const&) = delete;
RestHandler& operator=(RestHandler const&) = delete;
public:
static thread_local RestHandler const* CURRENT_HANDLER;
public:
RestHandler(GeneralRequest*, GeneralResponse*);

View File

@ -35,6 +35,7 @@
#include "Meta/conversion.h"
#include "Scheduler/Scheduler.h"
#include "Scheduler/SchedulerFeature.h"
#include "Utils/Events.h"
#include "VocBase/ticks.h"
#include <velocypack/Validator.h>
@ -274,6 +275,7 @@ bool VppCommTask::processRead() {
}
if (level != AuthLevel::RW) {
events::NotAuthorized(request.get());
handleSimpleError(rest::ResponseCode::UNAUTHORIZED, TRI_ERROR_FORBIDDEN,
"not authorized to execute this request",
chunkHeader._messageID);

View File

@ -598,6 +598,7 @@ bool Index::canUseConditionPart(arangodb::aql::AstNode const* access,
arangodb::aql::AstNode const* other,
arangodb::aql::AstNode const* op,
arangodb::aql::Variable const* reference,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
if (_sparse) {
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_NIN) {
@ -634,12 +635,33 @@ bool Index::canUseConditionPart(arangodb::aql::AstNode const* access,
if (!other->isConstant()) {
return false;
}
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_NE &&
other->isNullValue()) {
// != null. now note that a certain attribute cannot become null
try { nonNullAttributes.emplace(access->toString()); } catch (...) {}
} else if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GT) {
// > null. now note that a certain attribute cannot become null
try { nonNullAttributes.emplace(access->toString()); } catch (...) {}
} else if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE &&
!other->isNullValue()) {
// >= non-null. now note that a certain attribute cannot become null
try { nonNullAttributes.emplace(access->toString()); } catch (...) {}
}
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT ||
op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE) {
// < and <= are not supported with sparse indexes as this may include
// null values
return false;
try {
// check if we've marked this attribute as being non-null already
if (nonNullAttributes.find(access->toString()) == nonNullAttributes.end()) {
return false;
}
} catch (...) {
return false;
}
}
if (other->isNullValue() &&
@ -647,7 +669,14 @@ bool Index::canUseConditionPart(arangodb::aql::AstNode const* access,
op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE)) {
// == and >= null are not supported with sparse indexes for the same
// reason
return false;
try {
// check if we've marked this attribute as being non-null already
if (nonNullAttributes.find(access->toString()) == nonNullAttributes.end()) {
return false;
}
} catch (...) {
return false;
}
}
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN &&

View File

@ -361,6 +361,7 @@ class Index {
arangodb::aql::AstNode const* other,
arangodb::aql::AstNode const* op,
arangodb::aql::Variable const* reference,
std::unordered_set<std::string>& nonNullAttributes,
bool) const;
//////////////////////////////////////////////////////////////////////////////

View File

@ -612,8 +612,9 @@ bool RocksDBIndex::accessFitsIndex(
arangodb::aql::AstNode const* op, arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
found,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
if (!this->canUseConditionPart(access, other, op, reference, isExecution)) {
if (!this->canUseConditionPart(access, other, op, reference, nonNullAttributes, isExecution)) {
return false;
}
@ -709,7 +710,9 @@ void RocksDBIndex::matchAttributes(
arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
found,
size_t& values, bool isExecution) const {
size_t& values,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
for (size_t i = 0; i < node->numMembers(); ++i) {
auto op = node->getMember(i);
@ -721,14 +724,14 @@ void RocksDBIndex::matchAttributes(
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE:
TRI_ASSERT(op->numMembers() == 2);
accessFitsIndex(op->getMember(0), op->getMember(1), op, reference,
found, isExecution);
found, nonNullAttributes, isExecution);
accessFitsIndex(op->getMember(1), op->getMember(0), op, reference,
found, isExecution);
found, nonNullAttributes, isExecution);
break;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN:
if (accessFitsIndex(op->getMember(0), op->getMember(1), op, reference,
found, isExecution)) {
found, nonNullAttributes, isExecution)) {
auto m = op->getMember(1);
if (m->isArray() && m->numMembers() > 1) {
// attr IN [ a, b, c ] => this will produce multiple items, so
@ -749,8 +752,9 @@ bool RocksDBIndex::supportsFilterCondition(
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) const {
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
std::unordered_set<std::string> nonNullAttributes;
size_t values = 0;
matchAttributes(node, reference, found, values, false);
matchAttributes(node, reference, found, values, nonNullAttributes, false);
bool lastContainsEquality = true;
size_t attributesCovered = 0;
@ -908,8 +912,9 @@ IndexIterator* RocksDBIndex::iteratorForCondition(
VPackArrayBuilder guard(&searchValues);
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
std::unordered_set<std::string> nonNullAttributes;
size_t unused = 0;
matchAttributes(node, reference, found, unused, true);
matchAttributes(node, reference, found, unused, nonNullAttributes, true);
// found contains all attributes that are relevant for this node.
// It might be less than fields().
@ -1086,8 +1091,9 @@ arangodb::aql::AstNode* RocksDBIndex::specializeCondition(
arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) const {
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
std::unordered_set<std::string> nonNullAttributes;
size_t values = 0;
matchAttributes(node, reference, found, values, false);
matchAttributes(node, reference, found, values, nonNullAttributes, false);
std::vector<arangodb::aql::AstNode const*> children;
bool lastContainsEquality = true;

View File

@ -208,12 +208,15 @@ class RocksDBIndex final : public PathBasedIndex {
arangodb::aql::AstNode const*, arangodb::aql::AstNode const*,
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&,
std::unordered_set<std::string>& nonNullAttributes,
bool) const;
void matchAttributes(
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&,
size_t&, bool) const;
size_t& values,
std::unordered_set<std::string>& nonNullAttributes,
bool) const;
private:

View File

@ -43,6 +43,8 @@ bool SimpleAttributeEqualityMatcher::matchOne(
arangodb::Index const* index, arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) {
std::unordered_set<std::string> nonNullAttributes;
_found.clear();
for (size_t i = 0; i < node->numMembers(); ++i) {
@ -52,9 +54,9 @@ bool SimpleAttributeEqualityMatcher::matchOne(
TRI_ASSERT(op->numMembers() == 2);
// EQ is symmetric
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false) ||
reference, nonNullAttributes, false) ||
accessFitsIndex(index, op->getMember(1), op->getMember(0), op,
reference, false)) {
reference, nonNullAttributes, false)) {
// we can use the index
calculateIndexCosts(index, itemsInIndex, estimatedItems, estimatedCost);
return true;
@ -62,7 +64,7 @@ bool SimpleAttributeEqualityMatcher::matchOne(
} else if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN) {
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false)) {
reference, nonNullAttributes, false)) {
// we can use the index
// use slightly different cost calculation for IN that for EQ
calculateIndexCosts(index, itemsInIndex, estimatedItems, estimatedCost);
@ -88,8 +90,10 @@ bool SimpleAttributeEqualityMatcher::matchAll(
arangodb::Index const* index, arangodb::aql::AstNode const* node,
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) {
_found.clear();
std::unordered_set<std::string> nonNullAttributes;
size_t values = 0;
_found.clear();
for (size_t i = 0; i < node->numMembers(); ++i) {
auto op = node->getMember(i);
@ -98,15 +102,15 @@ bool SimpleAttributeEqualityMatcher::matchAll(
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false) ||
reference, nonNullAttributes, false) ||
accessFitsIndex(index, op->getMember(1), op->getMember(0), op,
reference, false)) {
reference, nonNullAttributes, false)) {
}
} else if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN) {
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false)) {
reference, nonNullAttributes, false)) {
auto m = op->getMember(1);
if (m->isArray() && m->numMembers() > 1) {
@ -151,6 +155,8 @@ bool SimpleAttributeEqualityMatcher::matchAll(
arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeOne(
arangodb::Index const* index, arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) {
std::unordered_set<std::string> nonNullAttributes;
_found.clear();
size_t const n = node->numMembers();
@ -162,9 +168,9 @@ arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeOne(
TRI_ASSERT(op->numMembers() == 2);
// EQ is symmetric
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false) ||
reference, nonNullAttributes, false) ||
accessFitsIndex(index, op->getMember(1), op->getMember(0), op,
reference, false)) {
reference, nonNullAttributes, false)) {
// we can use the index
// now return only the child node we need
while (node->numMembers() > 0) {
@ -178,7 +184,7 @@ arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeOne(
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false)) {
reference, nonNullAttributes, false)) {
// we can use the index
// now return only the child node we need
while (node->numMembers() > 0) {
@ -204,6 +210,8 @@ arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeOne(
arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeAll(
arangodb::Index const* index, arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) {
std::unordered_set<std::string> nonNullAttributes;
_found.clear();
size_t const n = node->numMembers();
@ -214,9 +222,9 @@ arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeAll(
if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_EQ) {
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false) ||
reference, nonNullAttributes, false) ||
accessFitsIndex(index, op->getMember(1), op->getMember(0), op,
reference, false)) {
reference, nonNullAttributes, false)) {
TRI_IF_FAILURE("SimpleAttributeMatcher::specializeAllChildrenEQ") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
@ -228,7 +236,7 @@ arangodb::aql::AstNode* SimpleAttributeEqualityMatcher::specializeAll(
} else if (op->type == arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN) {
TRI_ASSERT(op->numMembers() == 2);
if (accessFitsIndex(index, op->getMember(0), op->getMember(1), op,
reference, false)) {
reference, nonNullAttributes, false)) {
TRI_IF_FAILURE("SimpleAttributeMatcher::specializeAllChildrenIN") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
@ -319,8 +327,10 @@ void SimpleAttributeEqualityMatcher::calculateIndexCosts(
bool SimpleAttributeEqualityMatcher::accessFitsIndex(
arangodb::Index const* index, arangodb::aql::AstNode const* access,
arangodb::aql::AstNode const* other, arangodb::aql::AstNode const* op,
arangodb::aql::Variable const* reference, bool isExecution) {
if (!index->canUseConditionPart(access, other, op, reference, isExecution)) {
arangodb::aql::Variable const* reference,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) {
if (!index->canUseConditionPart(access, other, op, reference, nonNullAttributes, isExecution)) {
return false;
}

View File

@ -107,7 +107,9 @@ class SimpleAttributeEqualityMatcher {
bool accessFitsIndex(arangodb::Index const*, arangodb::aql::AstNode const*,
arangodb::aql::AstNode const*,
arangodb::aql::AstNode const*,
arangodb::aql::Variable const*, bool);
arangodb::aql::Variable const*,
std::unordered_set<std::string>& nonNullAttributes,
bool);
private:
//////////////////////////////////////////////////////////////////////////////

View File

@ -41,16 +41,16 @@ static size_t sortWeight(arangodb::aql::AstNode const* node) {
return 1;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN:
return 2;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT:
return 3;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GT:
return 4;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE:
return 5;
return 3;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE:
return 4;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT:
return 5;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE:
return 6;
default:
return 42;
return 42; /* OPST_CIRCUS */
}
}
@ -1124,8 +1124,9 @@ bool SkiplistIndex::accessFitsIndex(
arangodb::aql::AstNode const* op, arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
found,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
if (!this->canUseConditionPart(access, other, op, reference, isExecution)) {
if (!this->canUseConditionPart(access, other, op, reference, nonNullAttributes, isExecution)) {
return false;
}
@ -1221,7 +1222,9 @@ void SkiplistIndex::matchAttributes(
arangodb::aql::Variable const* reference,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&
found,
size_t& values, bool isExecution) const {
size_t& values,
std::unordered_set<std::string>& nonNullAttributes,
bool isExecution) const {
for (size_t i = 0; i < node->numMembers(); ++i) {
auto op = node->getMember(i);
@ -1233,14 +1236,14 @@ void SkiplistIndex::matchAttributes(
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE:
TRI_ASSERT(op->numMembers() == 2);
accessFitsIndex(op->getMember(0), op->getMember(1), op, reference,
found, isExecution);
found, nonNullAttributes, isExecution);
accessFitsIndex(op->getMember(1), op->getMember(0), op, reference,
found, isExecution);
found, nonNullAttributes, isExecution);
break;
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN:
if (accessFitsIndex(op->getMember(0), op->getMember(1), op, reference,
found, isExecution)) {
found, nonNullAttributes, isExecution)) {
auto m = op->getMember(1);
if (m->isArray() && m->numMembers() > 1) {
// attr IN [ a, b, c ] => this will produce multiple items, so
@ -1259,8 +1262,9 @@ void SkiplistIndex::matchAttributes(
bool SkiplistIndex::accessFitsIndex(
arangodb::aql::AstNode const* access, arangodb::aql::AstNode const* other,
arangodb::aql::AstNode const* op, arangodb::aql::Variable const* reference,
std::vector<std::vector<arangodb::aql::AstNode const*>>& found) const {
if (!this->canUseConditionPart(access, other, op, reference, true)) {
std::vector<std::vector<arangodb::aql::AstNode const*>>& found,
std::unordered_set<std::string>& nonNullAttributes) const {
if (!this->canUseConditionPart(access, other, op, reference, nonNullAttributes, true)) {
return false;
}
@ -1351,6 +1355,7 @@ bool SkiplistIndex::findMatchingConditions(
arangodb::aql::Variable const* reference,
std::vector<std::vector<arangodb::aql::AstNode const*>>& mapping,
bool& usesIn) const {
std::unordered_set<std::string> nonNullAttributes;
usesIn = false;
for (size_t i = 0; i < node->numMembers(); ++i) {
@ -1364,14 +1369,14 @@ bool SkiplistIndex::findMatchingConditions(
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE: {
TRI_ASSERT(op->numMembers() == 2);
accessFitsIndex(op->getMember(0), op->getMember(1), op, reference,
mapping);
mapping, nonNullAttributes);
accessFitsIndex(op->getMember(1), op->getMember(0), op, reference,
mapping);
mapping, nonNullAttributes);
break;
}
case arangodb::aql::NODE_TYPE_OPERATOR_BINARY_IN: {
auto m = op->getMember(1);
if (accessFitsIndex(op->getMember(0), m, op, reference, mapping)) {
if (accessFitsIndex(op->getMember(0), m, op, reference, mapping, nonNullAttributes)) {
if (m->numMembers() == 0) {
// We want to do an IN [].
// No results
@ -1468,8 +1473,9 @@ bool SkiplistIndex::supportsFilterCondition(
arangodb::aql::Variable const* reference, size_t itemsInIndex,
size_t& estimatedItems, double& estimatedCost) const {
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
std::unordered_set<std::string> nonNullAttributes;
size_t values = 0;
matchAttributes(node, reference, found, values, false);
matchAttributes(node, reference, found, values, nonNullAttributes, false);
bool lastContainsEquality = true;
size_t attributesCovered = 0;
@ -1604,8 +1610,9 @@ arangodb::aql::AstNode* SkiplistIndex::specializeCondition(
arangodb::aql::AstNode* node,
arangodb::aql::Variable const* reference) const {
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
std::unordered_set<std::string> nonNullAttributes;
size_t values = 0;
matchAttributes(node, reference, found, values, false);
matchAttributes(node, reference, found, values, nonNullAttributes, false);
std::vector<arangodb::aql::AstNode const*> children;
bool lastContainsEquality = true;

View File

@ -422,18 +422,21 @@ class SkiplistIndex final : public PathBasedIndex {
arangodb::aql::AstNode const*, arangodb::aql::AstNode const*,
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&,
std::unordered_set<std::string>& nonNullAttributes,
bool) const;
bool accessFitsIndex(
arangodb::aql::AstNode const*, arangodb::aql::AstNode const*,
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,
std::vector<std::vector<arangodb::aql::AstNode const*>>&) const;
std::vector<std::vector<arangodb::aql::AstNode const*>>&,
std::unordered_set<std::string>& nonNullAttributes) const;
void matchAttributes(
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>>&,
size_t&, bool) const;
size_t& values,
std::unordered_set<std::string>& nonNullAttributes,
bool) const;
bool findMatchingConditions(
arangodb::aql::AstNode const*, arangodb::aql::Variable const*,

View File

@ -107,21 +107,22 @@ RestHandler::status RestAuthHandler::execute() {
return badRequest();
}
std::string const username = usernameSlice.copyString();
_username = usernameSlice.copyString();
std::string const password = passwordSlice.copyString();
AuthResult auth =
GeneralServerFeature::AUTH_INFO.checkPassword(username, password);
GeneralServerFeature::AUTH_INFO.checkPassword(_username, password);
if (auth._authorized) {
VPackBuilder resultBuilder;
{
VPackObjectBuilder b(&resultBuilder);
std::string jwt = generateJwt(username, password);
std::string jwt = generateJwt(_username, password);
resultBuilder.add("jwt", VPackValue(jwt));
resultBuilder.add("must_change_password", VPackValue(auth._mustChange));
}
_isValid = true;
generateDocument(resultBuilder.slice(), true, &VPackOptions::Defaults);
return status::DONE;
} else {

View File

@ -46,10 +46,18 @@ class RestAuthHandler : public RestVocbaseBaseHandler {
bool isDirect() const override;
status execute() override;
#ifdef USE_ENTERPRISE
void finalizeExecute() override;
#endif
private:
status badRequest();
private:
std::string _jwtSecret;
std::string _username;
bool _isValid = false;
std::chrono::seconds _validFor;
status badRequest();
};
}

View File

@ -253,7 +253,7 @@ RestHandler::status RestBatchHandler::executeHttp() {
httpResponse->body().appendText(TRI_CHAR_LENGTH_PAIR("\r\n\r\n"));
// remove some headers we don't need
partResponse->setConnectionType(rest::ConnectionType::CONNECTION_NONE);
partResponse->setConnectionType(rest::ConnectionType::C_NONE);
partResponse->setHeaderNC(StaticStrings::Server, "");
// append the part response header

View File

@ -57,6 +57,10 @@ class RestCursorHandler : public RestVocbaseBaseHandler {
public:
virtual status execute() override;
#ifdef USE_ENTERPRISE
void finalizeExecute() override;
#endif
bool cancel() override;
protected:

View File

@ -35,6 +35,10 @@ class RestDocumentHandler : public RestVocbaseBaseHandler {
public:
status execute() override final;
#ifdef USE_ENTERPRISE
void finalizeExecute() override;
#endif
protected:
virtual TRI_col_type_e getCollectionType() const {
return TRI_COL_TYPE_DOCUMENT;
@ -66,7 +70,6 @@ class RestDocumentHandler : public RestVocbaseBaseHandler {
// deletes a document
bool deleteDocument();
};
}

View File

@ -117,7 +117,7 @@ class RestImportHandler : public RestVocbaseBaseHandler {
bool createFromKeyValueList();
bool createFromKeyValueListVPack() {
LOG(ERR) << " not implemened";
LOG(ERR) << " not implemented";
return false;
}

View File

@ -23,8 +23,8 @@
#include "DatabaseFeature.h"
#include "Agency/v8-agency.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Agency/v8-agency.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Aql/QueryCache.h"
#include "Aql/QueryRegistry.h"
#include "Basics/ArangoGlobalContext.h"
@ -44,6 +44,7 @@
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "Utils/CursorRepository.h"
#include "Utils/Events.h"
#include "V8Server/V8DealerFeature.h"
#include "V8Server/v8-query.h"
#include "V8Server/v8-vocbase.h"
@ -69,13 +70,13 @@ DatabaseFeature* DatabaseFeature::DATABASE = nullptr;
/// @brief database manager thread main loop
/// the purpose of this thread is to physically remove directories of databases
/// that have been dropped
DatabaseManagerThread::DatabaseManagerThread()
: Thread("DatabaseManager") {}
DatabaseManagerThread::DatabaseManagerThread() : Thread("DatabaseManager") {}
DatabaseManagerThread::~DatabaseManagerThread() { shutdown(); }
void DatabaseManagerThread::run() {
auto databaseFeature = ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
ApplicationServer::getFeature<DatabaseFeature>("Database");
auto dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
int cleanupCycles = 0;
@ -134,8 +135,8 @@ void DatabaseManagerThread::run() {
}
if (database->type() != TRI_VOCBASE_TYPE_COORDINATOR) {
// regular database
// ---------------------------
// regular database
// ---------------------------
#ifdef ARANGODB_ENABLE_ROCKSDB
// delete persistent indexes for this database
@ -143,8 +144,8 @@ void DatabaseManagerThread::run() {
#endif
LOG(TRACE) << "physically removing database directory '"
<< engine->databasePath(database) << "' of database '" << database->name()
<< "'";
<< engine->databasePath(database) << "' of database '"
<< database->name() << "'";
std::string path;
@ -158,7 +159,7 @@ void DatabaseManagerThread::run() {
if (TRI_IsDirectory(path.c_str())) {
LOG(TRACE) << "removing app directory '" << path
<< "' of database '" << database->name() << "'";
<< "' of database '" << database->name() << "'";
TRI_RemoveDirectory(path.c_str());
}
@ -296,8 +297,7 @@ void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
}
}
void DatabaseFeature::prepare() {
}
void DatabaseFeature::prepare() {}
void DatabaseFeature::start() {
// set singleton
@ -318,12 +318,14 @@ void DatabaseFeature::start() {
int res = iterateDatabases(builder.slice());
if (res != TRI_ERROR_NO_ERROR) {
LOG(FATAL) << "could not iterate over all databases: " << TRI_errno_string(res);
LOG(FATAL) << "could not iterate over all databases: "
<< TRI_errno_string(res);
FATAL_ERROR_EXIT();
}
if (systemDatabase() == nullptr) {
LOG(FATAL) << "No _system database found in database directory. Cannot start!";
LOG(FATAL)
<< "No _system database found in database directory. Cannot start!";
FATAL_ERROR_EXIT();
}
@ -422,10 +424,13 @@ int DatabaseFeature::recoveryDone() {
}
/// @brief create a new database
int DatabaseFeature::createDatabaseCoordinator(TRI_voc_tick_t id, std::string const& name, TRI_vocbase_t*& result) {
int DatabaseFeature::createDatabaseCoordinator(TRI_voc_tick_t id,
std::string const& name,
TRI_vocbase_t*& result) {
result = nullptr;
if (!TRI_vocbase_t::IsAllowedName(true, name)) {
events::CreateDatabase(name, TRI_ERROR_ARANGO_DATABASE_NAME_INVALID);
return TRI_ERROR_ARANGO_DATABASE_NAME_INVALID;
}
@ -438,12 +443,14 @@ int DatabaseFeature::createDatabaseCoordinator(TRI_voc_tick_t id, std::string co
auto it = theLists->_coordinatorDatabases.find(name);
if (it != theLists->_coordinatorDatabases.end()) {
// name already in use
events::CreateDatabase(name, TRI_ERROR_ARANGO_DUPLICATE_NAME);
return TRI_ERROR_ARANGO_DUPLICATE_NAME;
}
}
// name not yet in use, release the read lock
auto vocbase = std::make_unique<TRI_vocbase_t>(TRI_VOCBASE_TYPE_COORDINATOR, id, name);
auto vocbase =
std::make_unique<TRI_vocbase_t>(TRI_VOCBASE_TYPE_COORDINATOR, id, name);
try {
vocbase->addReplicationApplier(TRI_CreateReplicationApplier(vocbase.get()));
@ -472,6 +479,7 @@ int DatabaseFeature::createDatabaseCoordinator(TRI_voc_tick_t id, std::string co
result = vocbase.get();
vocbase.release();
events::CreateDatabase(name, TRI_ERROR_NO_ERROR);
return TRI_ERROR_NO_ERROR;
}
@ -481,6 +489,7 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
result = nullptr;
if (!TRI_vocbase_t::IsAllowedName(false, name)) {
events::CreateDatabase(name, TRI_ERROR_ARANGO_DATABASE_NAME_INVALID);
return TRI_ERROR_ARANGO_DATABASE_NAME_INVALID;
}
@ -488,7 +497,6 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
id = TRI_NewTickServer();
}
std::unique_ptr<TRI_vocbase_t> vocbase;
VPackBuilder builder;
@ -504,6 +512,7 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
auto it = theLists->_databases.find(name);
if (it != theLists->_databases.end()) {
// name already in use
events::CreateDatabase(name, TRI_ERROR_ARANGO_DUPLICATE_NAME);
return TRI_ERROR_ARANGO_DUPLICATE_NAME;
}
}
@ -521,7 +530,8 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
TRI_ASSERT(vocbase != nullptr);
try {
vocbase->addReplicationApplier(TRI_CreateReplicationApplier(vocbase.get()));
vocbase->addReplicationApplier(
TRI_CreateReplicationApplier(vocbase.get()));
} catch (std::exception const& ex) {
LOG(FATAL) << "initializing replication applier for database '"
<< vocbase->name() << "' failed: " << ex.what();
@ -529,7 +539,8 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
}
// enable deadlock detection
vocbase->_deadlockDetector.enabled(!arangodb::ServerState::instance()->isRunningInCluster());
vocbase->_deadlockDetector.enabled(
!arangodb::ServerState::instance()->isRunningInCluster());
// create application directories
V8DealerFeature* dealer =
@ -585,6 +596,7 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
}
result = vocbase.release();
events::CreateDatabase(name, res);
return res;
}
@ -628,11 +640,15 @@ int DatabaseFeature::dropDatabaseCoordinator(TRI_voc_tick_t id, bool force) {
} else {
delete newLists;
}
events::DropDatabase(vocbase == nullptr ? "" : vocbase->name(), res);
return res;
}
/// @brief drop database
int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, bool waitForDeletion, bool removeAppsDirectory) {
int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker,
bool waitForDeletion,
bool removeAppsDirectory) {
if (name == TRI_VOC_SYSTEM_DATABASE) {
// prevent deletion of system database
return TRI_ERROR_FORBIDDEN;
@ -654,6 +670,7 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
if (it == newLists->_databases.end()) {
// not found
delete newLists;
events::DropDatabase(name, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
return TRI_ERROR_ARANGO_DATABASE_NOT_FOUND;
} else {
vocbase = it->second;
@ -663,6 +680,7 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
if (!vocbase->markAsDropped()) {
// deleted by someone else?
events::DropDatabase(name, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
return TRI_ERROR_ARANGO_DATABASE_NOT_FOUND;
}
@ -700,11 +718,14 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
engine->waitUntilDeletion(id, true);
}
events::DropDatabase(name, res);
return res;
}
/// @brief drops an existing database
int DatabaseFeature::dropDatabase(TRI_voc_tick_t id, bool writeMarker, bool waitForDeletion, bool removeAppsDirectory) {
int DatabaseFeature::dropDatabase(TRI_voc_tick_t id, bool writeMarker,
bool waitForDeletion,
bool removeAppsDirectory) {
std::string name;
// find database by name
@ -726,7 +747,8 @@ int DatabaseFeature::dropDatabase(TRI_voc_tick_t id, bool writeMarker, bool wait
return dropDatabase(name, writeMarker, waitForDeletion, removeAppsDirectory);
}
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIdsCoordinator(bool includeSystem) {
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIdsCoordinator(
bool includeSystem) {
std::vector<TRI_voc_tick_t> ids;
{
auto unuser(_databasesProtector.use());
@ -745,7 +767,8 @@ std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIdsCoordinator(bool incl
return ids;
}
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIds(bool includeSystem) {
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIds(
bool includeSystem) {
std::vector<TRI_voc_tick_t> ids;
{
@ -788,7 +811,8 @@ std::vector<std::string> DatabaseFeature::getDatabaseNames() {
}
/// @brief return the list of all database names for a user
std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(std::string const& username) {
std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(
std::string const& username) {
std::vector<std::string> names;
{
@ -799,8 +823,8 @@ std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(std::string co
TRI_vocbase_t* vocbase = p.second;
TRI_ASSERT(vocbase != nullptr);
auto level =
GeneralServerFeature::AUTH_INFO.canUseDatabase(username, vocbase->name());
auto level = GeneralServerFeature::AUTH_INFO.canUseDatabase(
username, vocbase->name());
if (level == AuthLevel::NONE) {
continue;
@ -841,7 +865,8 @@ TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(TRI_voc_tick_t id) {
return nullptr;
}
TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(std::string const& name) {
TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(
std::string const& name) {
auto unuser(_databasesProtector.use());
auto theLists = _databasesLists.load();
@ -903,8 +928,6 @@ TRI_vocbase_t* DatabaseFeature::lookupDatabaseCoordinator(
return nullptr;
}
/// @brief lookup a database by its name, not increasing its reference count
TRI_vocbase_t* DatabaseFeature::lookupDatabase(std::string const& name) {
auto unuser(_databasesProtector.use());
@ -935,8 +958,8 @@ void DatabaseFeature::updateContexts() {
ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
dealer->defineContextUpdate(
[queryRegistry, vocbase](
v8::Isolate* isolate, v8::Handle<v8::Context> context, size_t i) {
[queryRegistry, vocbase](v8::Isolate* isolate,
v8::Handle<v8::Context> context, size_t i) {
TRI_InitV8VocBridge(isolate, context, queryRegistry, vocbase, i);
TRI_InitV8Queries(isolate, context);
TRI_InitV8Cluster(isolate, context);
@ -948,7 +971,8 @@ void DatabaseFeature::updateContexts() {
void DatabaseFeature::closeDatabases() {
// stop the replication appliers so all replication transactions can end
if (_replicationApplier) {
MUTEX_LOCKER(mutexLocker, _databasesMutex); // Only one should do this at a time
MUTEX_LOCKER(mutexLocker,
_databasesMutex); // Only one should do this at a time
// No need for the thread protector here, because we have the mutex
for (auto& p : _databasesLists.load()->_databases) {
@ -964,7 +988,8 @@ void DatabaseFeature::closeDatabases() {
/// @brief close all opened databases
void DatabaseFeature::closeOpenDatabases() {
MUTEX_LOCKER(mutexLocker, _databasesMutex); // Only one should do this at a time
MUTEX_LOCKER(mutexLocker,
_databasesMutex); // Only one should do this at a time
// No need for the thread protector here, because we have the mutex
// Note however, that somebody could still read the lists concurrently,
// therefore we first install a new value, call scan() on the protector
@ -1035,12 +1060,14 @@ int DatabaseFeature::createBaseApplicationDirectory(std::string const& appPath,
}
/// @brief create app subdirectory for a database
int DatabaseFeature::createApplicationDirectory(std::string const& name, std::string const& basePath) {
int DatabaseFeature::createApplicationDirectory(std::string const& name,
std::string const& basePath) {
if (basePath.empty()) {
return TRI_ERROR_NO_ERROR;
}
std::string const path = basics::FileUtils::buildFilename(basics::FileUtils::buildFilename(basePath, "_db"), name);
std::string const path = basics::FileUtils::buildFilename(
basics::FileUtils::buildFilename(basePath, "_db"), name);
int res = TRI_ERROR_NO_ERROR;
if (!TRI_IsDirectory(path.c_str())) {
@ -1051,7 +1078,7 @@ int DatabaseFeature::createApplicationDirectory(std::string const& name, std::st
if (res == TRI_ERROR_NO_ERROR) {
if (arangodb::wal::LogfileManager::instance()->isInRecovery()) {
LOG(TRACE) << "created application directory '" << path
<< "' for database '" << name << "'";
<< "' for database '" << name << "'";
} else {
LOG(INFO) << "created application directory '" << path
<< "' for database '" << name << "'";
@ -1062,7 +1089,7 @@ int DatabaseFeature::createApplicationDirectory(std::string const& name, std::st
res = TRI_ERROR_NO_ERROR;
} else {
LOG(ERR) << "unable to create application directory '" << path
<< "' for database '" << name << "': " << errorMessage;
<< "' for database '" << name << "': " << errorMessage;
}
}
@ -1071,7 +1098,8 @@ int DatabaseFeature::createApplicationDirectory(std::string const& name, std::st
/// @brief iterate over all databases in the databases directory and open them
int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
V8DealerFeature* dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
V8DealerFeature* dealer =
ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
std::string const appPath = dealer->appPath();
StorageEngine* engine = EngineSelectorFeature::ENGINE;
@ -1087,7 +1115,7 @@ int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
try {
for (auto const& it : VPackArrayIterator(databases)) {
TRI_ASSERT(it.isObject());
VPackSlice deleted = it.get("deleted");
if (deleted.isBoolean() && deleted.getBoolean()) {
// ignore deleted databases here
@ -1109,12 +1137,12 @@ int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
TRI_vocbase_t* vocbase = engine->openDatabase(it, _upgrade);
// we found a valid database
TRI_ASSERT(vocbase != nullptr);
try {
vocbase->addReplicationApplier(TRI_CreateReplicationApplier(vocbase));
} catch (std::exception const& ex) {
LOG(FATAL) << "initializing replication applier for database '"
<< vocbase->name() << "' failed: " << ex.what();
<< vocbase->name() << "' failed: " << ex.what();
FATAL_ERROR_EXIT();
}
@ -1192,7 +1220,8 @@ void DatabaseFeature::closeDroppedDatabases() {
void DatabaseFeature::verifyAppPaths() {
// create shared application directory js/apps
V8DealerFeature* dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
V8DealerFeature* dealer =
ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
auto appPath = dealer->appPath();
if (!appPath.empty() && !TRI_IsDirectory(appPath.c_str())) {
@ -1202,7 +1231,8 @@ void DatabaseFeature::verifyAppPaths() {
errorMessage);
if (res == TRI_ERROR_NO_ERROR) {
LOG(INFO) << "created --javascript.app-path directory '" << appPath << "'";
LOG(INFO) << "created --javascript.app-path directory '" << appPath
<< "'";
} else {
LOG(ERR) << "unable to create --javascript.app-path directory '"
<< appPath << "': " << errorMessage;
@ -1233,7 +1263,8 @@ void DatabaseFeature::enableDeadlockDetection() {
}
/// @brief writes a create-database marker into the log
int DatabaseFeature::writeCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice) {
int DatabaseFeature::writeCreateMarker(TRI_voc_tick_t id,
VPackSlice const& slice) {
int res = TRI_ERROR_NO_ERROR;
try {

View File

@ -35,6 +35,7 @@
#include "GeneralServer/GeneralServerFeature.h"
#include "Logger/Logger.h"
#include "Ssl/SslInterface.h"
#include "Utils/Events.h"
#include "VocBase/AuthInfo.h"
#include "VocBase/vocbase.h"
@ -82,7 +83,9 @@ bool VocbaseContext::useClusterAuthentication() const {
rest::ResponseCode VocbaseContext::authenticate() {
TRI_ASSERT(_vocbase != nullptr);
auto restServer = application_features::ApplicationServer::getFeature<GeneralServerFeature>("GeneralServer");
auto restServer =
application_features::ApplicationServer::getFeature<GeneralServerFeature>(
"GeneralServer");
if (!restServer->authentication()) {
// no authentication required at all
@ -117,6 +120,7 @@ rest::ResponseCode VocbaseContext::authenticate() {
GeneralServerFeature::AUTH_INFO.canUseDatabase(username, dbname);
if (level != AuthLevel::RW) {
events::NotAuthorized(_request);
result = rest::ResponseCode::UNAUTHORIZED;
}
}
@ -126,10 +130,10 @@ rest::ResponseCode VocbaseContext::authenticate() {
return result;
}
rest::ResponseCode VocbaseContext::authenticateRequest(
bool* forceOpen) {
auto restServer = application_features::ApplicationServer::getFeature<GeneralServerFeature>("GeneralServer");
rest::ResponseCode VocbaseContext::authenticateRequest(bool* forceOpen) {
auto restServer =
application_features::ApplicationServer::getFeature<GeneralServerFeature>(
"GeneralServer");
#ifdef ARANGODB_HAVE_DOMAIN_SOCKETS
// check if we need to run authentication for this type of
// endpoint
@ -170,16 +174,20 @@ rest::ResponseCode VocbaseContext::authenticateRequest(
_request->header(StaticStrings::Authorization, found);
if (!found) {
events::CredentialsMissing(_request);
return rest::ResponseCode::UNAUTHORIZED;
}
size_t methodPos = authStr.find_first_of(' ');
if (methodPos == std::string::npos) {
events::UnknownAuthenticationMethod(_request);
return rest::ResponseCode::UNAUTHORIZED;
}
// skip over authentication method
char const* auth = authStr.c_str() + methodPos;
while (*auth == ' ') {
++auth;
}
@ -191,7 +199,7 @@ rest::ResponseCode VocbaseContext::authenticateRequest(
} else if (TRI_CaseEqualString(authStr.c_str(), "bearer ", 7)) {
return jwtAuthentication(std::string(auth));
} else {
// mop: hmmm is 403 the correct status code? or 401? or 400? :S
events::UnknownAuthenticationMethod(_request);
return rest::ResponseCode::UNAUTHORIZED;
}
}
@ -200,12 +208,12 @@ rest::ResponseCode VocbaseContext::authenticateRequest(
/// @brief checks the authentication via basic
////////////////////////////////////////////////////////////////////////////////
rest::ResponseCode VocbaseContext::basicAuthentication(
const char* auth) {
rest::ResponseCode VocbaseContext::basicAuthentication(const char* auth) {
if (useClusterAuthentication()) {
std::string const expected = ServerState::instance()->getAuthentication();
if (expected.substr(6) != std::string(auth)) {
events::UnknownAuthenticationMethod(_request);
return rest::ResponseCode::UNAUTHORIZED;
}
@ -216,24 +224,27 @@ rest::ResponseCode VocbaseContext::basicAuthentication(
LOG(TRACE) << "invalid authentication data found, cannot extract "
"username/password";
events::UnknownAuthenticationMethod(_request);
return rest::ResponseCode::BAD;
}
_request->setUser(up.substr(0, n));
events::Authenticated(_request, rest::AuthenticationMethod::BASIC);
return rest::ResponseCode::OK;
}
AuthResult result = GeneralServerFeature::AUTH_INFO.checkAuthentication(
AuthInfo::AuthType::BASIC, auth);
_request->setUser(std::move(result._username));
if (!result._authorized) {
events::CredentialsBad(_request, rest::AuthenticationMethod::BASIC);
return rest::ResponseCode::UNAUTHORIZED;
}
// we have a user name, verify 'mustChange'
_request->setUser(std::move(result._username));
if (result._mustChange) {
if ((_request->requestType() == rest::RequestType::PUT ||
_request->requestType() == rest::RequestType::PATCH) &&
@ -241,9 +252,11 @@ rest::ResponseCode VocbaseContext::basicAuthentication(
return rest::ResponseCode::OK;
}
events::PasswordChangeRequired(_request);
return rest::ResponseCode::FORBIDDEN;
}
events::Authenticated(_request, rest::AuthenticationMethod::BASIC);
return rest::ResponseCode::OK;
}
@ -251,15 +264,18 @@ rest::ResponseCode VocbaseContext::basicAuthentication(
/// @brief checks the authentication via jwt
////////////////////////////////////////////////////////////////////////////////
rest::ResponseCode VocbaseContext::jwtAuthentication(
std::string const& auth) {
rest::ResponseCode VocbaseContext::jwtAuthentication(std::string const& auth) {
AuthResult result = GeneralServerFeature::AUTH_INFO.checkAuthentication(
AuthInfo::AuthType::JWT, auth);
if (!result._authorized) {
events::CredentialsBad(_request, rest::AuthenticationMethod::JWT);
return rest::ResponseCode::UNAUTHORIZED;
}
// we have a user name, verify 'mustChange'
_request->setUser(std::move(result._username));
events::Authenticated(_request, rest::AuthenticationMethod::JWT);
return rest::ResponseCode::OK;
}

View File

@ -22,14 +22,12 @@
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#include "Basics/directories.h"
#include "Basics/tri-strings.h"
#include "Actions/ActionFeature.h"
#include "Agency/AgencyFeature.h"
#ifdef _WIN32
#include "ApplicationFeatures/WindowsServiceFeature.h"
#endif
#include "ApplicationFeatures/ConfigFeature.h"
#include "ApplicationFeatures/DaemonFeature.h"
#include "ApplicationFeatures/GreetingsFeature.h"
@ -86,6 +84,14 @@
#include "Indexes/RocksDBFeature.h"
#endif
#ifdef USE_ENTERPRISE
#include "Enterprise/Audit/AuditFeature.h"
#endif
#ifdef _WIN32
#include "ApplicationFeatures/WindowsServiceFeature.h"
#endif
using namespace arangodb;
using namespace arangodb::wal;
@ -102,17 +108,17 @@ static int runServer(int argc, char** argv) {
application_features::ApplicationServer server(options);
std::vector<std::string> nonServerFeatures = {
"Action", "Affinity", "Agency", "Cluster",
"Daemon", "Dispatcher", "Endpoint", "FoxxQueues",
"GeneralServer", "LoggerBufferFeature", "Server", "Scheduler",
"SslServer", "Statistics", "Supervisor"};
"Action", "Affinity",
"Agency", "Cluster",
"Daemon", "Dispatcher",
"Endpoint", "FoxxQueues",
"GeneralServer", "LoggerBufferFeature",
"Server", "Scheduler",
"SslServer", "Statistics",
"Supervisor"};
int ret = EXIT_FAILURE;
#ifdef _WIN32
server.addFeature(new WindowsServiceFeature(&server));
#endif
server.addFeature(new ActionFeature(&server));
server.addFeature(new AffinityFeature(&server));
server.addFeature(new AgencyFeature(&server));
@ -174,6 +180,14 @@ static int runServer(int argc, char** argv) {
server.addFeature(supervisor.release());
#endif
#ifdef USE_ENTERPRISE
server.addFeature(new AuditFeature(&server));
#endif
#ifdef _WIN32
server.addFeature(new WindowsServiceFeature(&server));
#endif
// storage engines
server.addFeature(new MMFilesEngine(&server));
server.addFeature(

41
arangod/Utils/Events.cpp Normal file
View File

@ -0,0 +1,41 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
////////////////////////////////////////////////////////////////////////////////
#include "Events.h"
namespace arangodb {
namespace events {
void UnknownAuthenticationMethod(GeneralRequest const*) {}
void CredentialsMissing(GeneralRequest const*) {}
void CredentialsBad(GeneralRequest*, rest::AuthenticationMethod) {}
void PasswordChangeRequired(GeneralRequest const*) {}
void Authenticated(GeneralRequest*, rest::AuthenticationMethod) {}
void NotAuthorized(GeneralRequest const*) {}
void CreateCollection(std::string const& name, int result) {}
void DropCollection(std::string const& name, int result) {}
void TruncateCollection(std::string const& name, int result) {}
void CreateDatabase(std::string const& name, int result) {}
void DropDatabase(std::string const& name, int result) {}
void CreateIndex(std::string const& col, VPackSlice const&) {}
void DropIndex(std::string const& col, std::string const& idx, int result) {}
}
}

53
arangod/Utils/Events.h Normal file
View File

@ -0,0 +1,53 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_UTILS_EVENTS_H
#define ARANGOD_UTILS_EVENTS_H 1
#include "Basics/Common.h"
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
#include "Rest/CommonDefines.h"
namespace arangodb {
class GeneralRequest;
namespace events {
void UnknownAuthenticationMethod(GeneralRequest const*);
void CredentialsMissing(GeneralRequest const*);
void CredentialsBad(GeneralRequest*, rest::AuthenticationMethod);
void PasswordChangeRequired(GeneralRequest const*);
void Authenticated(GeneralRequest*, rest::AuthenticationMethod);
void NotAuthorized(GeneralRequest const*);
void CreateCollection(std::string const& name, int result);
void DropCollection(std::string const& name, int result);
void TruncateCollection(std::string const& name, int result);
void CreateDatabase(std::string const& name, int result);
void DropDatabase(std::string const& name, int result);
void CreateIndex(std::string const& col, VPackSlice const&);
void DropIndex(std::string const& col, std::string const& idx, int result);
}
}
#endif

View File

@ -41,6 +41,7 @@
#include "Indexes/SkiplistIndex.h"
#include "Logger/Logger.h"
#include "Utils/CollectionNameResolver.h"
#include "Utils/Events.h"
#include "Utils/OperationCursor.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Utils/TransactionContext.h"
@ -2618,12 +2619,16 @@ OperationResult Transaction::truncate(std::string const& collectionName,
TRI_ASSERT(getStatus() == TRI_TRANSACTION_RUNNING);
OperationOptions optionsCopy = options;
OperationResult result;
if (ServerState::isCoordinator(_serverRole)) {
return truncateCoordinator(collectionName, optionsCopy);
result = truncateCoordinator(collectionName, optionsCopy);
} else {
result = truncateLocal(collectionName, optionsCopy);
}
return truncateLocal(collectionName, optionsCopy);
events::TruncateCollection(collectionName, result.code);
return result;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -142,6 +142,7 @@ void WorkMonitor::pushHandler(RestHandler* handler) {
TRI_ASSERT(desc->_type == WorkType::HANDLER);
activateWorkDescription(desc);
RestHandler::CURRENT_HANDLER = handler;
}
WorkDescription* WorkMonitor::popHandler(RestHandler* handler, bool free) {
@ -161,6 +162,8 @@ WorkDescription* WorkMonitor::popHandler(RestHandler* handler, bool free) {
}
}
// TODO(fc) we might have a stack of handlers
RestHandler::CURRENT_HANDLER = nullptr;
return desc;
}

View File

@ -109,8 +109,11 @@ static void WeakCollectionCallback(const v8::WeakCallbackData<
}
}
/////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief wraps a LogicalCollection
/// Note that if collection is a local collection, then the object will never
/// be freed. If it is not a local collection (coordinator case), then delete
/// will be called when the V8 object is garbage collected.
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Object> WrapCollection(v8::Isolate* isolate,

View File

@ -180,7 +180,8 @@ static int ParseDocumentOrDocumentHandle(v8::Isolate* isolate,
try {
std::shared_ptr<LogicalCollection> col =
ci->getCollection(vocbase->name(), collectionName);
collection = col->clone();
auto colCopy = col->clone();
collection = colCopy.release();
} catch (...) {
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
}
@ -795,11 +796,24 @@ static void JS_DocumentVocbaseCol(
}
#ifndef USE_ENTERPRISE
////////////////////////////////////////////////////////////////////////////////
/// @brief unloads a collection, case of a coordinator in a cluster
////////////////////////////////////////////////////////////////////////////////
static int ULVocbaseColCoordinator(std::string const& databaseName,
std::string const& collectionCID,
TRI_vocbase_col_status_e status) {
return ClusterInfo::instance()->setCollectionStatusCoordinator(
databaseName, collectionCID, status);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief drops a collection, case of a coordinator in a cluster
////////////////////////////////////////////////////////////////////////////////
#ifndef USE_ENTERPRISE
static void DropVocbaseColCoordinator(
v8::FunctionCallbackInfo<v8::Value> const& args,
arangodb::LogicalCollection* collection) {
@ -989,37 +1003,41 @@ static void JS_LoadVocbaseCol(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
if (ServerState::instance()->isCoordinator()) {
std::string const databaseName(collection->dbName());
std::string const cid = collection->cid_as_string();
int res = ClusterInfo::instance()->setCollectionStatusCoordinator(
databaseName, cid, TRI_VOC_COL_STATUS_LOADED);
int res =
#ifdef USE_ENTERPRISE
ULVocbaseColCoordinatorEnterprise(
collection->dbName(), collection->cid_as_string(),
TRI_VOC_COL_STATUS_LOADED);
#else
ULVocbaseColCoordinator(
collection->dbName(), collection->cid_as_string(),
TRI_VOC_COL_STATUS_LOADED);
#endif
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_UNDEFINED();
}
SingleCollectionTransaction trx(
V8TransactionContext::Create(collection->vocbase(), true),
collection->cid(), TRI_TRANSACTION_READ);
V8TransactionContext::Create(collection->vocbase(), true),
collection->cid(), TRI_TRANSACTION_READ);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
res = trx.finish(res);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
TRI_V8_RETURN_UNDEFINED();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
@ -1160,6 +1178,7 @@ static void JS_PropertiesVocbaseCol(
TRI_V8_THROW_EXCEPTION_PARAMETER(
"indexBuckets must be a two-power between 1 and 1024");
}
int res = info->update(slice, false);
if (res != TRI_ERROR_NO_ERROR) {
@ -2469,11 +2488,18 @@ static void JS_UnloadVocbaseCol(
int res;
if (ServerState::instance()->isCoordinator()) {
std::string const databaseName(collection->dbName());
res = ClusterInfo::instance()->setCollectionStatusCoordinator(
databaseName, collection->cid_as_string(),
res =
#ifdef USE_ENTERPRISE
ULVocbaseColCoordinatorEnterprise(
collection->dbName(), collection->cid_as_string(),
TRI_VOC_COL_STATUS_UNLOADED);
#else
ULVocbaseColCoordinator(
collection->dbName(), collection->cid_as_string(),
TRI_VOC_COL_STATUS_UNLOADED);
#endif
} else {
res = collection->vocbase()->unloadCollection(collection, false);
}
@ -2589,7 +2615,8 @@ static void JS_CollectionVocbase(
try {
std::shared_ptr<LogicalCollection> const ci =
ClusterInfo::instance()->getCollection(vocbase->name(), name);
collection = ci->clone();
auto colCopy = ci->clone();
collection = colCopy.release();
} catch (...) {
// not found
TRI_V8_RETURN_NULL();

View File

@ -27,6 +27,7 @@
#include "Basics/Common.h"
#include "Utils/CollectionNameResolver.h"
#include "V8Server/v8-vocbase.h"
#include "VocBase/vocbase.h"
namespace arangodb {
class LogicalCollection;
@ -46,7 +47,12 @@ bool EqualCollection(arangodb::CollectionNameResolver const* resolver,
std::string const& collectionName,
arangodb::LogicalCollection const* collection);
////////////////////////////////////////////////////////////////////////////////
/// @brief wraps a LogicalCollection
/// Note that if collection is a local collection, then the object will never
/// be freed. If it is not a local collection (coordinator case), then delete
/// will be called when the V8 object is garbage collected.
////////////////////////////////////////////////////////////////////////////////
v8::Handle<v8::Object> WrapCollection(
v8::Isolate* isolate, arangodb::LogicalCollection const* collection);
@ -60,5 +66,11 @@ void TRI_InitV8Collection(v8::Handle<v8::Context> context,
void DropVocbaseColCoordinatorEnterprise(
v8::FunctionCallbackInfo<v8::Value> const& args,
arangodb::LogicalCollection* collection);
int ULVocbaseColCoordinatorEnterprise(std::string const& databaseName,
std::string const& collectionCID,
TRI_vocbase_col_status_e status);
#endif
#endif

View File

@ -74,8 +74,8 @@
#include "V8Server/v8-statistics.h"
#include "V8Server/v8-voccursor.h"
#include "V8Server/v8-vocindex.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/KeyGenerator.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/modes.h"
#include "Wal/LogfileManager.h"
@ -1652,12 +1652,17 @@ static void JS_ThrowCollectionNotLoaded(
v8::HandleScope scope(isolate);
if (args.Length() == 0) {
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
bool const value = databaseFeature->throwCollectionNotLoadedError();
TRI_V8_RETURN(v8::Boolean::New(isolate, value));
} else if (args.Length() == 1) {
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
databaseFeature->throwCollectionNotLoadedError(TRI_ObjectToBoolean(args[0]));
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
databaseFeature->throwCollectionNotLoadedError(
TRI_ObjectToBoolean(args[0]));
} else {
TRI_V8_THROW_EXCEPTION_USAGE("THROW_COLLECTION_NOT_LOADED(<value>)");
}
@ -1824,8 +1829,8 @@ static void MapGetVocBase(v8::Local<v8::String> const name,
v8::Handle<v8::Object> value =
cacheObject->GetRealNamedProperty(cacheName)->ToObject();
collection =
TRI_UnwrapClass<arangodb::LogicalCollection>(value, WRP_VOCBASE_COL_TYPE);
collection = TRI_UnwrapClass<arangodb::LogicalCollection>(
value, WRP_VOCBASE_COL_TYPE);
// check if the collection is from the same database
if (collection != nullptr && collection->vocbase() == vocbase) {
@ -1869,7 +1874,8 @@ static void MapGetVocBase(v8::Local<v8::String> const name,
if (ServerState::instance()->isCoordinator()) {
auto ci = ClusterInfo::instance()->getCollection(vocbase->name(),
std::string(key));
collection = ci->clone();
auto colCopy = ci->clone();
collection = colCopy.release(); // will be delete on garbage collection
} else {
collection = vocbase->lookupCollection(std::string(key));
}
@ -1944,7 +1950,7 @@ static void JS_PathDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
if (vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
}
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_V8_RETURN_STD_STRING(engine->databasePath(vocbase));
@ -2025,7 +2031,9 @@ static void JS_UseDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_FORBIDDEN);
}
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
std::string const name = TRI_ObjectToString(args[0]);
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
@ -2158,7 +2166,9 @@ static void JS_Databases(v8::FunctionCallbackInfo<v8::Value> const& args) {
return;
}
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
std::vector<std::string> names;
if (argc == 0) {
@ -2166,7 +2176,8 @@ static void JS_Databases(v8::FunctionCallbackInfo<v8::Value> const& args) {
names = databaseFeature->getDatabaseNames();
} else {
// return all databases for a specific user
names = databaseFeature->getDatabaseNamesForUser(TRI_ObjectToString(args[0]));
names =
databaseFeature->getDatabaseNamesForUser(TRI_ObjectToString(args[0]));
}
v8::Handle<v8::Array> result = v8::Array::New(isolate, (int)names.size());
@ -2237,8 +2248,10 @@ static void CreateDatabaseCoordinator(
// now wait for heartbeat thread to create the database object
TRI_vocbase_t* vocbase = nullptr;
int tries = 0;
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
while (++tries <= 6000) {
vocbase = databaseFeature->useDatabaseCoordinator(id);
@ -2326,7 +2339,8 @@ static void JS_CreateDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_GET_GLOBALS();
TRI_voc_tick_t id = 0;
// options for database (currently only allows setting "id" for testing purposes)
// options for database (currently only allows setting "id" for testing
// purposes)
if (args.Length() > 1 && args[1]->IsObject()) {
v8::Handle<v8::Object> options = args[1]->ToObject();
@ -2339,7 +2353,9 @@ static void JS_CreateDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
std::string const name = TRI_ObjectToString(args[0]);
DatabaseFeature* databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
DatabaseFeature* databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
TRI_vocbase_t* database = nullptr;
int res = databaseFeature->createDatabase(id, name, true, database);
@ -2392,7 +2408,9 @@ static void DropDatabaseCoordinator(
v8::HandleScope scope(isolate);
// Arguments are already checked, there is exactly one argument
auto databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
auto databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
std::string const name = TRI_ObjectToString(args[0]);
TRI_vocbase_t* vocbase = databaseFeature->useDatabaseCoordinator(name);
@ -2465,7 +2483,9 @@ static void JS_DropDatabase(v8::FunctionCallbackInfo<v8::Value> const& args) {
std::string const name = TRI_ObjectToString(args[0]);
DatabaseFeature* databaseFeature = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database");
DatabaseFeature* databaseFeature =
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database");
int res = databaseFeature->dropDatabase(name, true, false, true);
if (res != TRI_ERROR_NO_ERROR) {
@ -2527,35 +2547,36 @@ static void JS_Endpoints(v8::FunctionCallbackInfo<v8::Value> const& args) {
static void JS_ClearTimers(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
auto cb = [](uint64_t, VPackSlice const& slice) {
//LOG(ERR) << "GCING OBJECT " << slice.toJson();
// LOG(ERR) << "GCING OBJECT " << slice.toJson();
};
GlobalRevisionCache cache(2 * 1024 * 1024, 32 * 1024 * 1024, cb);
GlobalRevisionCache cache(2 * 1024 * 1024, 32 * 1024 * 1024, cb);
auto f = [&cache]() {
VPackBuilder builder;
for (size_t i = 0; i < 10 * 1000 * 1000; ++i) {
builder.clear();
builder.add(VPackValue("der hans, der geht ins Klavier"));
VPackSlice slice = builder.slice();
RevisionReader reader = cache.storeAndLease(0, slice.begin(), slice.byteSize());
auto f = [&cache]() {
VPackBuilder builder;
for (size_t i = 0; i < 10 * 1000 * 1000; ++i) {
builder.clear();
builder.add(VPackValue("der hans, der geht ins Klavier"));
VPackSlice slice = builder.slice();
if (i % 1000 == 0) {
LOG(ERR) << "CACHE STATS: " << cache.totalAllocated();
RevisionReader reader =
cache.storeAndLease(0, slice.begin(), slice.byteSize());
if (i % 1000 == 0) {
LOG(ERR) << "CACHE STATS: " << cache.totalAllocated();
}
reader.revision();
}
reader.revision();
}
};
};
auto gc = [&cache, &cb]() {
for (size_t i = 0; i < 10 * 1000 * 1000; ++i) {
//LOG(ERR) << "GC TRY " << i;
cache.garbageCollect();
}
};
auto gc = [&cache, &cb]() {
for (size_t i = 0; i < 10 * 1000 * 1000; ++i) {
// LOG(ERR) << "GC TRY " << i;
cache.garbageCollect();
}
};
std::vector<std::thread> threads;
threads.emplace_back(f);
@ -2707,8 +2728,7 @@ void TRI_V8ReloadRouting(v8::Isolate* isolate) {
void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle<v8::Context> context,
arangodb::aql::QueryRegistry* queryRegistry,
TRI_vocbase_t* vocbase,
size_t threadNumber) {
TRI_vocbase_t* vocbase, size_t threadNumber) {
v8::HandleScope scope(isolate);
// check the isolate

View File

@ -33,6 +33,7 @@
#include "Indexes/EdgeIndex.h"
#include "Indexes/Index.h"
#include "Indexes/PrimaryIndex.h"
#include "Utils/Events.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Utils/V8TransactionContext.h"
#include "V8/v8-conv.h"
@ -448,40 +449,14 @@ static int EnhanceIndexJson(v8::FunctionCallbackInfo<v8::Value> const& args,
/// @brief ensures an index, coordinator case
////////////////////////////////////////////////////////////////////////////////
static void EnsureIndexCoordinator(
v8::FunctionCallbackInfo<v8::Value> const& args,
LogicalCollection const* collection, VPackSlice const slice, bool create) {
v8::Isolate* isolate = args.GetIsolate();
v8::HandleScope scope(isolate);
TRI_ASSERT(collection != nullptr);
int EnsureIndexCoordinator(std::string const& databaseName,
std::string const& cid,
VPackSlice const slice, bool create,
VPackBuilder& resultBuilder, std::string& errorMsg) {
TRI_ASSERT(!slice.isNone());
std::string const databaseName(collection->dbName());
std::string const cid = collection->cid_as_string();
std::string const collectionName(collection->name());
VPackBuilder resultBuilder;
std::string errorMsg;
int res = ClusterInfo::instance()->ensureIndexCoordinator(
return ClusterInfo::instance()->ensureIndexCoordinator(
databaseName, cid, slice, create, &arangodb::Index::Compare,
resultBuilder, errorMsg, 360.0);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION_MESSAGE(res, errorMsg);
}
if (resultBuilder.slice().isNone()) {
if (!create) {
// did not find a suitable index
TRI_V8_RETURN_NULL();
}
TRI_V8_THROW_EXCEPTION_MEMORY();
}
v8::Handle<v8::Value> ret = IndexRep(isolate, collectionName, resultBuilder.slice());
TRI_V8_RETURN(ret);
}
////////////////////////////////////////////////////////////////////////////////
@ -621,9 +596,34 @@ static void EnsureIndex(v8::FunctionCallbackInfo<v8::Value> const& args,
}
TRI_ASSERT(!slice.isNone());
events::CreateIndex(collection->name(), slice);
// ensure an index, coordinator case
if (ServerState::instance()->isCoordinator()) {
EnsureIndexCoordinator(args, collection, slice, create);
VPackBuilder resultBuilder;
std::string errorMsg;
#ifdef USE_ENTERPRISE
int res = EnsureIndexCoordinatorEnterprise(collection, slice, create,
resultBuilder, errorMsg);
#else
std::string const databaseName(collection->dbName());
std::string const cid = collection->cid_as_string();
int res = EnsureIndexCoordinator(databaseName, cid, slice, create,
resultBuilder, errorMsg);
#endif
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION_MESSAGE(res, errorMsg);
}
if (resultBuilder.slice().isNone()) {
if (!create) {
// did not find a suitable index
TRI_V8_RETURN_NULL();
}
TRI_V8_THROW_EXCEPTION_MEMORY();
}
v8::Handle<v8::Value> ret = IndexRep(isolate, collection->name(), resultBuilder.slice());
TRI_V8_RETURN(ret);
} else {
EnsureIndexLocal(args, collection, slice, create);
}
@ -633,7 +633,7 @@ static void EnsureIndex(v8::FunctionCallbackInfo<v8::Value> const& args,
/// @brief create a collection on the coordinator
////////////////////////////////////////////////////////////////////////////////
LogicalCollection* CreateCollectionCoordinator(LogicalCollection* parameters) {
std::unique_ptr<LogicalCollection> CreateCollectionCoordinator(LogicalCollection* parameters) {
std::string distributeShardsLike = parameters->distributeShardsLike();
std::vector<std::string> dbServers;
@ -664,8 +664,6 @@ LogicalCollection* CreateCollectionCoordinator(LogicalCollection* parameters) {
parameters->distributeShardsLike(otherCidString);
}
}
// If the list dbServers is still empty, it will be filled in
// distributeShards below.
@ -699,8 +697,7 @@ LogicalCollection* CreateCollectionCoordinator(LogicalCollection* parameters) {
// collection does not exist. Also, the create collection should have
// failed before.
TRI_ASSERT(c != nullptr);
std::unique_ptr<LogicalCollection> newCol(c->clone());
return newCol.release();
return c->clone();
}
////////////////////////////////////////////////////////////////////////////////
@ -734,54 +731,15 @@ static void JS_LookupIndexVocbaseCol(
/// @brief drops an index, coordinator case
////////////////////////////////////////////////////////////////////////////////
static void DropIndexCoordinator(
v8::FunctionCallbackInfo<v8::Value> const& args,
arangodb::LogicalCollection const* collection, v8::Handle<v8::Value> const val) {
v8::Isolate* isolate = args.GetIsolate();
v8::HandleScope scope(isolate);
std::string collectionName;
TRI_idx_iid_t iid = 0;
// extract the index identifier from a string
if (val->IsString() || val->IsStringObject() || val->IsNumber()) {
if (!IsIndexHandle(val, collectionName, iid)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_INDEX_HANDLE_BAD);
}
}
// extract the index identifier from an object
else if (val->IsObject()) {
TRI_GET_GLOBALS();
v8::Handle<v8::Object> obj = val->ToObject();
TRI_GET_GLOBAL_STRING(IdKey);
v8::Handle<v8::Value> iidVal = obj->Get(IdKey);
if (!IsIndexHandle(iidVal, collectionName, iid)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_INDEX_HANDLE_BAD);
}
}
if (!collectionName.empty()) {
CollectionNameResolver resolver(collection->vocbase());
if (!EqualCollection(&resolver, collectionName, collection)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_CROSS_COLLECTION_REQUEST);
}
}
std::string const databaseName(collection->dbName());
std::string const cid = collection->cid_as_string();
int DropIndexCoordinator(
std::string const& databaseName,
std::string const& cid,
TRI_idx_iid_t const iid) {
std::string errorMsg;
int res = ClusterInfo::instance()->dropIndexCoordinator(databaseName, cid,
iid, errorMsg, 0.0);
return ClusterInfo::instance()->dropIndexCoordinator(databaseName, cid,
iid, errorMsg, 0.0);
if (res == TRI_ERROR_NO_ERROR) {
TRI_V8_RETURN_TRUE();
}
TRI_V8_RETURN_FALSE();
}
////////////////////////////////////////////////////////////////////////////////
@ -807,8 +765,49 @@ static void JS_DropIndexVocbaseCol(
}
if (ServerState::instance()->isCoordinator()) {
DropIndexCoordinator(args, collection, args[0]);
return;
std::string collectionName;
TRI_idx_iid_t iid = 0;
v8::Handle<v8::Value> const val = args[0];
// extract the index identifier from a string
if (val->IsString() || val->IsStringObject() || val->IsNumber()) {
if (!IsIndexHandle(val, collectionName, iid)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_INDEX_HANDLE_BAD);
}
}
// extract the index identifier from an object
else if (val->IsObject()) {
TRI_GET_GLOBALS();
v8::Handle<v8::Object> obj = val->ToObject();
TRI_GET_GLOBAL_STRING(IdKey);
v8::Handle<v8::Value> iidVal = obj->Get(IdKey);
if (!IsIndexHandle(iidVal, collectionName, iid)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_INDEX_HANDLE_BAD);
}
}
if (!collectionName.empty()) {
CollectionNameResolver resolver(collection->vocbase());
if (!EqualCollection(&resolver, collectionName, collection)) {
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_CROSS_COLLECTION_REQUEST);
}
}
#ifdef USE_ENTERPRISE
int res = DropIndexCoordinatorEnterprise(collection, iid);
#else
std::string const databaseName(collection->dbName());
std::string const cid = collection->cid_as_string();
int res = DropIndexCoordinator(databaseName, cid, iid);
#endif
if (res == TRI_ERROR_NO_ERROR) {
TRI_V8_RETURN_TRUE();
}
TRI_V8_RETURN_FALSE();
}
READ_LOCKER(readLocker, collection->vocbase()->_inventoryLock);
@ -1079,11 +1078,11 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
#ifndef USE_ENTERPRISE
auto parameters = std::make_unique<LogicalCollection>(vocbase, infoSlice, false);
TRI_V8_RETURN(
WrapCollection(isolate, CreateCollectionCoordinator(parameters.get())));
WrapCollection(isolate, CreateCollectionCoordinator(parameters.get()).release()));
#else
TRI_V8_RETURN(
WrapCollection(isolate, CreateCollectionCoordinatorEnterprise(
collectionType, vocbase, infoSlice)));
collectionType, vocbase, infoSlice).release()));
#endif
}

View File

@ -50,12 +50,37 @@ void TRI_InitV8indexCollection(v8::Isolate* isolate,
v8::Handle<v8::ObjectTemplate> rt);
// This could be static but is used in enterprise version as well
arangodb::LogicalCollection* CreateCollectionCoordinator(
// Note that this returns a newly allocated object and ownership is transferred
// to the caller, which is expressed by the returned unique_ptr.
std::unique_ptr<arangodb::LogicalCollection> CreateCollectionCoordinator(
arangodb::LogicalCollection* parameters);
#ifdef USE_ENTERPRISE
arangodb::LogicalCollection* CreateCollectionCoordinatorEnterprise(
std::unique_ptr<arangodb::LogicalCollection> CreateCollectionCoordinatorEnterprise(
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase,
arangodb::velocypack::Slice parameters);
#endif
int EnsureIndexCoordinator(std::string const& dbName, std::string const& cid,
arangodb::velocypack::Slice const slice, bool create,
arangodb::velocypack::Builder& resultBuilder,
std::string& errorMessage);
#ifdef USE_ENTERPRISE
int EnsureIndexCoordinatorEnterprise(
arangodb::LogicalCollection const* collection,
arangodb::velocypack::Slice const slice, bool create,
arangodb::velocypack::Builder& resultBuilder, std::string& errorMessage);
#endif
int DropIndexCoordinator(
std::string const& databaseName,
std::string const& cid,
TRI_idx_iid_t const iid);
#ifdef USE_ENTERPRISE
int DropIndexCoordinatorEnterprise(
arangodb::LogicalCollection const* collection, TRI_idx_iid_t const iid);
#endif
#endif

View File

@ -96,7 +96,7 @@ static AuthEntry CreateAuthEntry(VPackSlice const& slice) {
// extract "changePassword" attribute
bool mustChange =
VelocyPackHelper::getBooleanValue(slice, "changePassword", false);
VelocyPackHelper::getBooleanValue(authDataSlice, "changePassword", false);
// extract "databases" attribute
VPackSlice const databasesSlice = slice.get("databases");
@ -267,6 +267,7 @@ AuthResult AuthInfo::checkPassword(std::string const& username,
}
AuthResult result;
result._username = username;
// look up username
READ_LOCKER(readLocker, _authInfoLock);
@ -283,7 +284,6 @@ AuthResult AuthInfo::checkPassword(std::string const& username,
return result;
}
result._username = username;
result._mustChange = auth.mustChange();
std::string salted = auth.passwordSalt() + password;

View File

@ -48,6 +48,7 @@
#include "Utils/CollectionNameResolver.h"
#include "Utils/CollectionReadLocker.h"
#include "Utils/CollectionWriteLocker.h"
#include "Utils/Events.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Utils/StandaloneTransactionContext.h"
#include "VocBase/PhysicalCollection.h"
@ -1555,6 +1556,7 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
if (iid == 0) {
// invalid index id or primary index
events::DropIndex("", std::to_string(iid), TRI_ERROR_NO_ERROR);
return true;
}
@ -1563,6 +1565,7 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
_vocbase, name());
if (!removeIndex(iid)) {
// We tried to remove an index that does not exist
events::DropIndex("", std::to_string(iid), TRI_ERROR_ARANGO_INDEX_NOT_FOUND);
return false;
}
}
@ -1591,6 +1594,7 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
THROW_ARANGO_EXCEPTION(slotInfo.errorCode);
}
events::DropIndex("", std::to_string(iid), TRI_ERROR_NO_ERROR);
return true;
} catch (basics::Exception const& ex) {
res = ex.code();
@ -1599,6 +1603,7 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
}
LOG(WARN) << "could not save index drop marker in log: " << TRI_errno_string(res);
events::DropIndex("", std::to_string(iid), res);
// TODO: what to do here?
}

View File

@ -68,17 +68,19 @@ class LogicalCollection {
public:
LogicalCollection(TRI_vocbase_t*, arangodb::velocypack::Slice const&, bool isPhysical);
explicit LogicalCollection(LogicalCollection const&);
virtual ~LogicalCollection();
protected: // If you need a copy outside the class, use clone below.
explicit LogicalCollection(LogicalCollection const&);
private:
LogicalCollection& operator=(LogicalCollection const&) = delete;
public:
LogicalCollection() = delete;
virtual LogicalCollection* clone() {
return new LogicalCollection(*this);
virtual std::unique_ptr<LogicalCollection> clone() {
auto p = new LogicalCollection(*this);
return std::unique_ptr<LogicalCollection>(p);
}
/// @brief hard-coded minimum version number for collections
@ -257,7 +259,7 @@ class LogicalCollection {
virtual int update(arangodb::velocypack::Slice const&, bool);
/// @brief return the figures for a collection
std::shared_ptr<arangodb::velocypack::Builder> figures();
virtual std::shared_ptr<arangodb::velocypack::Builder> figures();
/// @brief opens an existing collection
void open(bool ignoreErrors);

View File

@ -52,6 +52,7 @@
#include "StorageEngine/StorageEngine.h"
#include "Utils/CollectionKeysRepository.h"
#include "Utils/CursorRepository.h"
#include "Utils/Events.h"
#include "V8Server/v8-user-structures.h"
#include "VocBase/Ditch.h"
#include "VocBase/LogicalCollection.h"
@ -292,6 +293,7 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
auto it = _collectionsByName.find(name);
if (it != _collectionsByName.end()) {
events::CreateCollection(name, TRI_ERROR_ARANGO_DUPLICATE_NAME);
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DUPLICATE_NAME);
}
@ -311,6 +313,7 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
if (writeMarker) {
collection->toVelocyPack(builder, false);
}
events::CreateCollection(name, TRI_ERROR_NO_ERROR);
return collection;
} catch (...) {
unregisterCollection(collection);
@ -481,6 +484,7 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
if (collection->status() == TRI_VOC_COL_STATUS_DELETED) {
// mark collection as deleted
unregisterCollection(collection);
events::DropCollection(colName, TRI_ERROR_NO_ERROR);
return TRI_ERROR_NO_ERROR;
}
@ -497,6 +501,7 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
engine->changeCollection(this, collection->cid(), collection, doSync);
} catch (arangodb::basics::Exception const& ex) {
collection->setDeleted(false);
events::DropCollection(colName, ex.code());
return ex.code();
}
}
@ -512,6 +517,7 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
DropCollectionCallback(collection);
events::DropCollection(colName, TRI_ERROR_NO_ERROR);
return TRI_ERROR_NO_ERROR;
}
@ -520,6 +526,7 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
// loop until status changes
// try again later
state = DROP_AGAIN;
events::DropCollection(colName, TRI_ERROR_NO_ERROR);
return TRI_ERROR_NO_ERROR;
}
@ -550,10 +557,12 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection,
}
state = DROP_PERFORM;
events::DropCollection(colName, TRI_ERROR_NO_ERROR);
return TRI_ERROR_NO_ERROR;
}
// unknown status
events::DropCollection(colName, TRI_ERROR_INTERNAL);
return TRI_ERROR_INTERNAL;
}

View File

@ -1564,7 +1564,7 @@ LogfileManagerState LogfileManager::state() {
// now fill the state
_slots->statistics(state.lastAssignedTick, state.lastCommittedTick,
state.lastCommittedDataTick, state.numEvents, state.numEventsSync);
state.timeString = TRI_timeString();
state.timeString = utilities::timeString();
return state;
}
@ -1850,7 +1850,7 @@ int LogfileManager::writeShutdownInfo(bool writeShutdownTime) {
builder.add("lastSealed", VPackValue(val));
if (writeShutdownTime) {
std::string const t(TRI_timeString());
std::string const t(utilities::timeString());
builder.add("shutdownTime", VPackValue(t));
}
builder.close();

View File

@ -69,3 +69,11 @@ list(APPEND PACKAGES_LIST package-arongodb-client)
add_custom_target(copy_packages
COMMAND cp *.deb ${PACKAGE_TARGET_DIR})
add_custom_target(remove_packages
COMMAND rm -f *.deb
COMMAND rm -rf _CPack_Packages
COMMAND rm -rf packages
)
list(APPEND CLEAN_PACKAGES_LIST remove_packages)

View File

@ -105,3 +105,12 @@ list(APPEND PACKAGES_LIST package-arongodb-client-nsis)
add_custom_target(copy_packages
COMMAND cp *.exe ${PACKAGE_TARGET_DIR})
add_custom_target(remove_packages
COMMAND rm -f *.zip
COMMAND rm -f *.exe
COMMAND rm -rf _CPack_Packages
)
list(APPEND CLEAN_PACKAGES_LIST remove_packages)

View File

@ -55,3 +55,10 @@ list(APPEND PACKAGES_LIST package-arongodb-server)
add_custom_target(copy_packages
COMMAND cp *.rpm ${PACKAGE_TARGET_DIR})
add_custom_target(remove_packages
COMMAND rm -f *.rpm
COMMAND rm -rf _CPack_Packages
)
list(APPEND CLEAN_PACKAGES_LIST remove_packages)

View File

@ -231,14 +231,19 @@ function put_api_user (req, res) {
const isActive = users.document(user).active;
if (isActive) {
if (needMyself(req, res, user)) {
if (needSystemUser(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.replace(user, json.passwd, json.active, json.extra,
json.changePassword));
} else if (needMyself(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.replace(user, json.passwd, json.active, json.extra));
}
} else {
if (needSystemUser(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.replace(user, json.passwd, json.active, json.extra));
users.replace(user, json.passwd, json.active, json.extra,
json.changePassword));
}
}
@ -333,14 +338,19 @@ function patch_api_user (req, res) {
const isActive = users.document(user).active;
if (isActive) {
if (needMyself(req, res, user)) {
if (needSystemUser(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.update(user, json.passwd, json.active, json.extra,
json.changePassword));
} else if (needMyself(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.update(user, json.passwd, json.active, json.extra));
}
} else {
if (needSystemUser(req, res, user)) {
actions.resultOk(req, res, actions.HTTP_OK,
users.update(user, json.passwd, json.active, json.extra));
users.update(user, json.passwd, json.active, json.extra,
json.changePassword));
}
}

View File

@ -1076,6 +1076,8 @@ actions.defineHttp({
"body must be an object with string attributes 'database', 'collection', 'shard', 'fromServer' and 'toServer'");
return;
}
body.shards=[body.shard];
body.collections=[body.collection];
var r = require('@arangodb/cluster').moveShard(body);
if (r.error) {
actions.resultError(req, res, actions.HTTP_SERVICE_UNAVAILABLE, r);

File diff suppressed because one or more lines are too long

View File

@ -2731,4 +2731,4 @@ var cutByResolution = function (str) {
</div>
<div id="workMonitorContent" class="innerContent">
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img id="ArangoDBLogo" class="arangodbLogo" src="img/arangodb_logo_big.svg"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span>VERSION: </span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1475246001209"></script><script src="app.js?version=1475246001209"></script></body></html>
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img id="ArangoDBLogo" class="arangodbLogo" src="img/arangodb_logo_big.svg"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span>VERSION: </span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1475752769169"></script><script src="app.js?version=1475752769169"></script></body></html>

File diff suppressed because one or more lines are too long

View File

@ -72,23 +72,11 @@
var res = '';
var parts = _.map(this.filters, function (f, i) {
if (f.op === 'LIKE') {
res = ' ' + f.op + '(x.`' + f.attr + '`, @param';
res = ' ' + f.op + '(x.@attr, @param';
res += i;
res += ')';
} else {
if (f.op === 'IN' || f.op === 'NOT IN') {
res = ' ';
} else {
res = ' x.`';
}
res += f.attr;
if (f.op === 'IN' || f.op === 'NOT IN') {
res += ' ';
} else {
res += '` ';
}
res += ' x.@attr ';
res += f.op;
@ -101,6 +89,12 @@
}
bindVars['param' + i] = f.val;
if (f.attr.indexOf('.') !== -1) {
bindVars['attr'] = f.attr.split('.');
} else {
bindVars['attr'] = f.attr;
}
return res;
});
return query + parts.join(' &&');

View File

@ -118,7 +118,7 @@
},
'nodeSizeByEdges': {
type: 'select',
name: 'Size By Collections',
name: 'Size By Connections',
yes: {
name: 'Yes',
val: 'true'
@ -267,7 +267,7 @@
checkEnterKey: function (e) {
if (e.keyCode === 13) {
this.saveGraphSettings();
this.saveGraphSettings(e);
}
},
@ -305,6 +305,8 @@
};
if (!this.noDefinedGraph) {
// usual graph view mode
// communication is needed
self.lastSaved = new Date();
var combinedName = frontendConfig.db + '_' + this.name;
@ -334,9 +336,39 @@
var callback = function () {
if (window.App.graphViewer) {
// no complete rerender needed
// LAYOUT
var value;
if (event) {
if (event.currentTarget.id === 'g_layout') {
window.App.graphViewer.switchLayout($('#g_layout').val());
return;
// NODES COLORING
} else if (event.currentTarget.id === 'g_nodeColorByCollection') {
value = $('#g_nodeColorByCollection').val();
if (value === 'true') {
window.App.graphViewer.switchNodeColorByCollection(true);
} else {
window.App.graphViewer.switchNodeColorByCollection(false);
}
return;
// EDGES COLORING
} else if (event.currentTarget.id === 'g_edgeColorByCollection') {
value = $('#g_edgeColorByCollection').val();
if (value === 'true') {
window.App.graphViewer.switchEdgeColorByCollection(true);
} else {
window.App.graphViewer.switchEdgeColorByCollection(false);
}
return;
}
}
if (color !== '' && color !== undefined) {
updateCols();
} else {
// complete render necessary - e.g. data needed
window.App.graphViewer.render(self.lastFocussed);
}
} else {
@ -352,10 +384,38 @@
this.userConfig.setItem('graphs', config, callback);
} else {
// aql mode - only visual
var value;
if (color) {
updateCols();
} else if (event.currentTarget.id === 'g_layout') {
window.App.graphViewer.rerenderAQL($('#g_layout').val(), null);
} else if (event.currentTarget.id === 'g_nodeColorByCollection') {
value = $('#g_nodeColorByCollection').val();
if (value === 'true') {
window.App.graphViewer.switchNodeColorByCollection(true);
} else {
window.App.graphViewer.switchNodeColorByCollection(false);
}
} else if (event.currentTarget.id === 'g_edgeColorByCollection') {
value = $('#g_edgeColorByCollection').val();
if (value === 'true') {
window.App.graphViewer.switchEdgeColorByCollection(true);
} else {
window.App.graphViewer.switchEdgeColorByCollection(false);
}
} else if (event.currentTarget.id === 'g_nodeSizeByEdges') {
value = $('#g_nodeSizeByEdges').val();
if (value === 'true') {
window.App.graphViewer.switchNodeSizeByCollection(true);
} else {
window.App.graphViewer.switchNodeSizeByCollection(false);
}
} else if (event.currentTarget.id === 'g_edgeType') {
window.App.graphViewer.switchEdgeType($('#g_edgeType').val());
}
}
this.handleDependencies();
},
setDefaults: function (saveOnly, silent, callback) {
@ -426,24 +486,38 @@
// node sizing
if ($('#g_nodeSizeByEdges').val() === 'true') {
$('#g_nodeSize').prop('disabled', true);
} else {
$('#g_nodeSize').removeAttr('disabled');
}
// node color
if ($('#g_nodeColorByCollection').val() === 'true') {
$('#g_nodeColorAttribute').prop('disabled', true);
$('#g_nodeColor').prop('disabled', true);
} else {
$('#g_nodeColorAttribute').removeAttr('disabled');
$('#g_nodeColor').removeAttr('disabled');
}
if ($('#g_nodeColorAttribute').val() !== '') {
$('#g_nodeColor').prop('disabled', true);
if (!this.noDefinedGraph) {
if ($('#g_nodeColorAttribute').val() !== '') {
$('#g_nodeColor').prop('disabled', true);
}
}
// edge color
if ($('#g_edgeColorByCollection').val() === 'true') {
$('#g_edgeColorAttribute').prop('disabled', true);
$('#g_edgeColor').prop('disabled', true);
} else {
$('#g_edgeColorAttribute').removeAttr('disabled');
$('#g_edgeColor').removeAttr('disabled');
}
if ($('#g_edgeColorAttribute').val() !== '') {
$('#g_edgeColor').prop('disabled', true);
if (!this.noDefinedGraph) {
if ($('#g_edgeColorAttribute').val() !== '') {
$('#g_edgeColor').prop('disabled', true);
}
}
},
@ -472,13 +546,25 @@
fitSettingsAQLMode: function () {
var toDisable = [
'g_nodeStart', 'g_depth', 'g_limit'
'g_nodeStart', 'g_depth', 'g_limit', 'g_renderer',
'g_nodeLabel', 'g_nodeLabelByCollection', 'g_nodeColorAttribute',
'g_nodeSize', 'g_edgeLabel', 'g_edgeColorAttribute', 'g_edgeLabelByCollection'
];
_.each(toDisable, function (elem) {
$('#' + elem).parent().prev().remove();
$('#' + elem).parent().remove();
});
$('#saveGraphSettings').remove();
$('#restoreGraphSettings').remove();
// overwrite usual defaults
$('#g_nodeColorByCollection').val('false');
$('#g_edgeColorByCollection').val('false');
$('#g_nodeSizeByEdges').val('false');
$('#g_edgeType').val('arrow');
$('#g_layout').val('force');
}
});

View File

@ -49,7 +49,16 @@
colors: {
hotaru: ['#364C4A', '#497C7F', '#92C5C0', '#858168', '#CCBCA5'],
random1: ['#292F36', '#4ECDC4', '#F7FFF7', '#DD6363', '#FFE66D']
random1: ['#292F36', '#4ECDC4', '#F7FFF7', '#DD6363', '#FFE66D'],
gv: [
'#68BDF6',
'#6DCE9E',
'#FF756E',
'#DE9BF9',
'#FB95AF',
'#FFD86E',
'#A5ABB6'
]
},
activeNodes: [],
@ -238,14 +247,197 @@
this.graphSettingsView.render();
},
killCurrentGraph: function () {
for (var i in this.currentGraph.renderers) {
this.currentGraph.renderers[i].clear();
this.currentGraph.kill(i);
}
},
rerenderAQL: function (layout, renderer) {
this.killCurrentGraph();
// TODO add WebGL features
this.renderGraph(this.graphData.modified, null, false, layout, 'canvas');
if ($('#g_nodeColorByCollection').val() === 'true') {
this.switchNodeColorByCollection(true);
} else {
if (this.ncolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
}
}
if ($('#g_edgeColorByCollection').val() === 'true') {
this.switchEdgeColorByCollection(true);
} else {
if (this.ecolor) {
this.updateColors(true, true, this.ncolor, this.ecolor);
} else {
this.updateColors(true, true, '#2ecc71', '#2ecc71');
}
}
},
buildCollectionColors: function () {
var self = this;
if (!self.collectionColors) {
self.collectionColors = {};
var pos = 0;
var tmpNodes = {};
var tmpEdges = {};
_.each(this.currentGraph.graph.nodes(), function (node) {
tmpNodes[node.id] = undefined;
});
_.each(self.currentGraph.graph.edges(), function (edge) {
tmpEdges[edge.id] = undefined;
});
_.each(tmpNodes, function (node, key) {
if (self.collectionColors[key.split('/')[0]] === undefined) {
self.collectionColors[key.split('/')[0]] = {color: self.colors.gv[pos]};
pos++;
}
});
pos = 0;
_.each(tmpEdges, function (edge, key) {
if (self.collectionColors[key.split('/')[0]] === undefined) {
self.collectionColors[key.split('/')[0]] = {color: self.colors.gv[pos]};
pos++;
}
});
}
},
switchNodeColorByCollection: function (boolean) {
var self = this;
self.buildCollectionColors();
if (boolean) {
self.currentGraph.graph.nodes().forEach(function (n) {
n.color = self.collectionColors[n.id.split('/')[0]].color;
});
self.currentGraph.refresh();
} else {
if (this.ncolor) {
this.updateColors(true, null, this.ncolor, this.ecolor);
} else {
this.updateColors(true, null, '#2ecc71', '#2ecc71');
}
}
},
switchEdgeColorByCollection: function (boolean) {
var self = this;
self.buildCollectionColors();
if (boolean) {
self.currentGraph.graph.edges().forEach(function (n) {
n.color = self.collectionColors[n.id.split('/')[0]].color;
});
self.currentGraph.refresh();
} else {
if (this.ecolor) {
this.updateColors(null, true, this.ncolor, this.ecolor);
} else {
this.updateColors(null, true, '#2ecc71', '#2ecc71');
}
}
},
buildCollectionSizes: function () {
var self = this;
if (!self.nodeEdgesCount) {
self.nodeEdgesCount = {};
var handledEdges = {};
_.each(this.currentGraph.graph.edges(), function (edge) {
if (handledEdges[edge.id] === undefined) {
handledEdges[edge.id] = true;
if (self.nodeEdgesCount[edge.source] === undefined) {
self.nodeEdgesCount[edge.source] = 1;
} else {
self.nodeEdgesCount[edge.source] += 1;
}
if (self.nodeEdgesCount[edge.target] === undefined) {
self.nodeEdgesCount[edge.target] = 1;
} else {
self.nodeEdgesCount[edge.target] += 1;
}
}
});
}
},
switchNodeSizeByCollection: function (boolean) {
var self = this;
if (boolean) {
self.buildCollectionSizes();
self.currentGraph.graph.nodes().forEach(function (n) {
n.size = self.nodeEdgesCount[n.id];
});
} else {
self.currentGraph.graph.nodes().forEach(function (n) {
n.size = 15;
});
}
self.currentGraph.refresh();
},
switchEdgeType: function (edgeType) {
var data = {
nodes: this.currentGraph.graph.nodes(),
edges: this.currentGraph.graph.edges(),
settings: {}
};
this.killCurrentGraph();
this.renderGraph(data, null, false, null, null, edgeType);
},
switchLayout: function (layout) {
var data = {
nodes: this.currentGraph.graph.nodes(),
edges: this.currentGraph.graph.edges(),
settings: {}
};
this.killCurrentGraph();
this.renderGraph(data, null, false, layout);
if ($('#g_nodeColorByCollection').val() === 'true') {
this.switchNodeColorByCollection(true);
}
if ($('#g_edgeColorByCollection').val() === 'true') {
this.switchEdgeColorByCollection(true);
} else {
this.switchEdgeColorByCollection(false);
}
},
parseData: function (data, type) {
var vertices = {}; var edges = {};
var color = '#2ecc71';
var returnObj = {
nodes: [],
edges: [],
settings: {}
};
if (this.ncolor) {
color = this.ncolor;
}
if (type === 'object') {
_.each(data, function (obj) {
if (obj.edges && obj.vertices) {
@ -266,7 +458,7 @@
id: node._id,
label: node._key,
// size: 0.3,
color: '#2ecc71',
color: color,
x: Math.random(),
y: Math.random()
};
@ -300,7 +492,7 @@
id: key,
label: key,
size: 0.3,
color: '#2ecc71',
color: color,
x: Math.random(),
y: Math.random()
});
@ -645,7 +837,7 @@
id: id,
label: id.split('/')[1] || '',
size: self.graphConfig.nodeSize || 15,
color: self.graphConfig.nodeColor || '#2ecc71',
color: self.graphConfig.nodeColor || self.ncolor || '#2ecc71',
x: x,
y: y
});
@ -778,14 +970,14 @@
size: 1,
target: to,
id: data._id,
color: self.graphConfig.edgeColor
color: self.graphConfig.edgeColor || self.ecolor
});
} else {
self.currentGraph.graph.addEdge({
source: from,
target: to,
id: data._id,
color: self.graphConfig.edgeColor
color: self.graphConfig.edgeColor || self.ecolor
});
}
@ -882,6 +1074,13 @@
var combinedName = frontendConfig.db + '_' + this.name;
var self = this;
if (ncolor) {
self.ncolor = ncolor;
}
if (ecolor) {
self.ecolor = ecolor;
}
this.userConfig.fetch({
success: function (data) {
if (nodes === true) {
@ -1329,7 +1528,6 @@
// rerender graph
if (newNodeCounter > 0 || newEdgeCounter > 0) {
if (self.algorithm === 'force') {
console.log(origin);
self.startLayout(true, origin);
} else if (self.algorithm === 'fruchtermann') {
sigma.layouts.fruchtermanReingold.start(self.currentGraph);
@ -1432,9 +1630,6 @@
editNode: function (id) {
var callback = function (a, b) {
console.log(1);
console.log(a);
console.log(b);
};
arangoHelper.openDocEditor(id, 'doc', callback);
},
@ -1513,11 +1708,16 @@
*/
},
renderGraph: function (graph, toFocus, aqlMode) {
renderGraph: function (graph, toFocus, aqlMode, layout, renderer, edgeType) {
var self = this;
this.graphSettings = graph.settings;
var color = '#2ecc71';
if (self.ncolor) {
color = self.ncolor;
}
if (graph.edges) {
if (graph.nodes) {
if (graph.nodes.length === 0 && graph.edges.length === 0) {
@ -1525,7 +1725,7 @@
id: graph.settings.startVertex._id,
label: graph.settings.startVertex._key,
size: 10,
color: '#2ecc71',
color: color,
x: Math.random(),
y: Math.random()
});
@ -1545,23 +1745,35 @@
this.Sigma = sigma;
// defaults
self.algorithm = 'force';
self.renderer = 'canvas';
if (!layout) {
self.algorithm = 'force';
} else {
self.algorithm = layout;
}
if (!renderer) {
self.renderer = 'canvas';
} else {
self.renderer = renderer;
}
if (this.graphConfig) {
if (this.graphConfig.layout) {
self.algorithm = this.graphConfig.layout;
if (!layout) {
self.algorithm = this.graphConfig.layout;
}
}
if (this.graphConfig.renderer) {
self.renderer = this.graphConfig.renderer;
if (self.renderer === 'canvas') {
self.isEditable = true;
if (!renderer) {
self.renderer = this.graphConfig.renderer;
}
}
}
if (self.renderer === 'canvas') {
self.isEditable = true;
}
// sigmajs graph settings
var settings = {
scalingMode: 'inside',
@ -1616,13 +1828,16 @@
if (this.graphConfig) {
if (this.graphConfig.edgeType) {
settings.defaultEdgeType = this.graphConfig.edgeType;
if (this.graphConfig.edgeType === 'arrow') {
settings.minArrowSize = 7;
}
}
}
if (edgeType) {
settings.defaultEdgeType = edgeType;
}
if (settings.defaultEdgeType === 'arrow') {
settings.minArrowSize = 7;
}
if (aqlMode) {
// aql editor settings
self.renderer = 'webgl';
@ -1751,7 +1966,7 @@
// validate edgeDefinitions
var foundEdgeDefinitions = self.getEdgeDefinitionCollections(fromCollection, toCollection);
self.addEdgeModal(foundEdgeDefinitions, self.contextState._from, self.contextState._to);
self.clearOldContextMenu(true);
self.clearOldContextMenu(false);
} else {
if (!self.dragging) {
if (self.contextState.createEdge === true) {
@ -1907,7 +2122,7 @@
// allow draggin nodes
} else if (self.algorithm === 'force') {
// add buttons for start/stopping calculation
var style2 = 'color: rgb(64, 74, 83); cursor: pointer; position: absolute; right: 30px; bottom: 40px;';
var style2 = 'color: rgb(64, 74, 83); cursor: pointer; position: absolute; right: 30px; bottom: 40px; z-index: 9999;';
if (self.aqlMode) {
style2 = 'color: rgb(64, 74, 83); cursor: pointer; position: absolute; right: 30px; margin-top: -30px;';
@ -2018,6 +2233,12 @@
self.graphNotInitialized = false;
self.tmpGraphArray = [];
}
if (self.algorithm === 'force') {
$('#toggleForce').fadeIn('fast');
} else {
$('#toggleForce').fadeOut('fast');
}
},
reInitDragListener: function () {
@ -2100,7 +2321,7 @@ $('#deleteNodes').remove();
self.stopLayout();
if (origin) {
this.currentGraph.refresh({ skipIndexation: true });
self.currentGraph.refresh({ skipIndexation: true });
// self.cameraToNode(origin);
}
}, 500);

View File

@ -331,6 +331,7 @@
this.toggleQueries();
}
var lastQueryName = localStorage.getItem('lastOpenQuery');
// backup the last query
this.state.lastQuery.query = this.aqlEditor.getValue();
this.state.lastQuery.bindParam = this.bindParamTableObj;
@ -350,10 +351,12 @@
// render a button to revert back to last query
$('#lastQuery').remove();
$('#queryContent .arangoToolbarTop .pull-left')
.append('<span id="lastQuery" class="clickable">Previous Query</span>');
if (lastQueryName !== name) {
$('#queryContent .arangoToolbarTop .pull-left')
.append('<span id="lastQuery" class="clickable">Previous Query</span>');
this.breadcrumb(name);
this.breadcrumb(name);
}
$('#lastQuery').hide().fadeIn(500)
.on('click', function () {
@ -536,6 +539,9 @@
$.noty.clearQueue();
$.noty.closeAll();
self.handleResult(counter);
// SCROLL TO RESULT BOX
$('.centralRow').animate({ scrollTop: $('#queryContent').height() }, 'fast');
}
afterResult();
},
@ -1023,6 +1029,27 @@
'</tr>'
);
}
// check if existing entry already has a stored value
var queryName = localStorage.getItem('lastOpenQuery');
var query = this.collection.findWhere({name: queryName});
try {
query = query.toJSON();
} catch (ignore) {
}
if (query) {
var attributeName;
_.each($('#arangoBindParamTable input'), function (elem) {
attributeName = $(elem).attr('name');
_.each(query.parameter, function (qVal, qKey) {
if (qKey === attributeName) {
$(elem).val(qVal);
}
});
});
}
},
fillBindParamTable: function (object) {
@ -1206,7 +1233,6 @@
addAQL: function () {
// update queries first, before showing
this.refreshAQL(true);
// render options
this.createCustomQueryModal();
setTimeout(function () {
@ -1218,14 +1244,19 @@
var content = this.aqlEditor.getValue();
var queryName = localStorage.getItem('lastOpenQuery');
var query = this.collection.findWhere({name: queryName});
if (query) {
// SET QUERY STRING
query.set('value', content);
// SET QUERY BIND PARAMS
query.set('parameter', this.bindParamTableObj);
var callback = function (error) {
if (error) {
arangoHelper.arangoError('Query', 'Could not save query');
} else {
var self = this;
arangoHelper.arangoNotification('Queries', 'Saved query ' + queryName);
arangoHelper.arangoNotification('Saved query', '"' + queryName + '"');
this.collection.fetch({
success: function () {
self.updateLocalQueries();
@ -1376,7 +1407,7 @@
window.setTimeout(function () {
if (name) {
$('#subNavigationBar .breadcrumb').html(
'Query: ' + name
'Query: <span id="lastQueryName">' + name + '</span>'
);
} else {
$('#subNavigationBar .breadcrumb').html('');
@ -1480,9 +1511,18 @@
data.batchSize = parseInt(sizeBox.val(), 10);
}
var bindVars = {};
if (Object.keys(this.bindParamTableObj).length > 0) {
_.each(this.bindParamTableObj, function (val, key) {
if (data.query.indexOf(key) > -1) {
bindVars[key] = val;
}
});
data.bindVars = this.bindParamTableObj;
}
if (Object.keys(bindVars).length > 0) {
data.bindVars = bindVars;
}
// add profile flag for query execution
if (forExecute) {
@ -1547,8 +1587,6 @@
window.setTimeout(function () {
self.aqlEditor.focus();
}, 300);
$('.centralRow').animate({ scrollTop: $('#queryContent').height() }, 'fast');
},
setEditorAutoHeight: function (editor) {
@ -1795,6 +1833,9 @@
checkQueryStatus();
}, 500);
}
// SCROLL TO RESULT BOX
$('.centralRow').animate({ scrollTop: $('#queryContent').height() }, 'fast');
},
error: function (resp) {
var error;

View File

@ -168,8 +168,8 @@
var data = {
database: dbName,
collection: collectionName,
shard: shardName,
collections: [collectionName],
shards: [shardName],
fromServer: fromServer,
toServer: toServer
};

View File

@ -145,7 +145,7 @@
right: -1px;
top: 100px;
width: 400px;
z-index: 10;
z-index: 999999;
.pure-g {

View File

@ -57,3 +57,7 @@
margin-top: -10px;
width: 100%;
}
.jsoneditor-contextmenu {
z-index: 999999999999;
}

View File

@ -488,6 +488,7 @@ function analyzeServerCrash (arangod, options, checkStr) {
if (fs.isFile(cpf)) {
var matchApport = /.*apport.*/;
var matchVarTmp = /\/var\/tmp/;
var matchSystemdCoredump = /.*systemd-coredump*/;
var corePattern = fs.readBuffer(cpf);
var cp = corePattern.asciiSlice(0, corePattern.length);
@ -495,7 +496,11 @@ function analyzeServerCrash (arangod, options, checkStr) {
print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis.");
return;
}
if (matchVarTmp.exec(cp) == null) {
if (matchSystemdCoredump.exec(cp) == null) {
options.coreDirectory = "/var/lib/systemd/coredump";
}
else if (matchVarTmp.exec(cp) == null) {
print(RED + "Don't know howto locate corefiles in your system. '" + cpf + "' contains: '" + cp + "'");
return;
}
@ -548,7 +553,7 @@ function checkArangoAlive (arangod, options) {
)
) {
arangod.exitStatus = res;
analyzeServerCrash(arangod, options, 'health Check');
analyzeServerCrash(arangod, options, 'health Check - ' + res.signal);
}
}
@ -1263,7 +1268,7 @@ function shutdownInstance (instanceInfo, options) {
}
} else if (arangod.exitStatus.status !== 'TERMINATED') {
if (arangod.exitStatus.hasOwnProperty('signal')) {
analyzeServerCrash(arangod, options, 'instance Shutdown');
analyzeServerCrash(arangod, options, 'instance Shutdown - ' + arangod.exitStatus.signal);
}
} else {
print('Server shutdown: Success.');

View File

@ -258,6 +258,7 @@
"ERROR_SERVICE_MOUNTPOINT_CONFLICT" : { "code" : 3011, "message" : "mountpoint already in use" },
"ERROR_MODULE_NOT_FOUND" : { "code" : 3100, "message" : "cannot locate module" },
"ERROR_MODULE_FAILURE" : { "code" : 3103, "message" : "failed to invoke module" },
"ERROR_NO_SMART_COLLECTION" : { "code" : 4000, "message" : "collection is not smart" },
"ERROR_DISPATCHER_IS_STOPPING" : { "code" : 21001, "message" : "dispatcher stopped" },
"ERROR_QUEUE_UNKNOWN" : { "code" : 21002, "message" : "named queue does not exist" },
"ERROR_QUEUE_FULL" : { "code" : 21003, "message" : "named queue is full" }

View File

@ -486,7 +486,7 @@ exports.checkAvailableVersions = function (version) {
try {
var u = 'https://www.arangodb.com/repositories/versions.php?version=' + version +
'&os=' + internal.platform;
var d = internal.download(u, '', {timeout: 10});
var d = internal.download(u, '', {timeout: 5});
var v = JSON.parse(d.body);
if (v.hasOwnProperty('bugfix')) {

Some files were not shown because too many files have changed in this diff Show More