1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Simon Grätzer 2017-02-03 21:27:08 +01:00
commit 63dfe865ba
57 changed files with 599 additions and 342 deletions

View File

@ -1,6 +1,8 @@
devel devel
----- -----
* fix potential port number over-/underruns
* added startup option `--log.shorten-filenames` for controlling whether filenames * added startup option `--log.shorten-filenames` for controlling whether filenames
in log message should be shortened to just the filename with the absolute path in log message should be shortened to just the filename with the absolute path

View File

@ -490,6 +490,8 @@ if (USE_MAINTAINER_MODE)
find_program(AWK_EXECUTABLE awk) find_program(AWK_EXECUTABLE awk)
endif () endif ()
find_program(FILE_EXECUTABLE file)
################################################################################ ################################################################################
## FAILURE TESTS ## FAILURE TESTS
################################################################################ ################################################################################

View File

@ -90,6 +90,42 @@ Create a geo index for a hash array attribute:
@END_EXAMPLE_ARANGOSH_OUTPUT @END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexCreateForArrayAttribute2 @endDocuBlock geoIndexCreateForArrayAttribute2
Use GeoIndex with AQL SORT statement:
@startDocuBlockInline geoIndexSortOptimization
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexSortOptimization}
~db._create("geoSort")
db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
| for (i = -90; i <= 90; i += 10) {
| for (j = -180; j <= 180; j += 10) {
| db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
| }
}
var query = "FOR doc in geoSort SORT distance(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc"
db._explain(query, {}, {colors: false});
db._query(query);
~db._drop("geoSort")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexSortOptimization
Use GeoIndex with AQL FILTER statement:
@startDocuBlockInline geoIndexFilterOptimization
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexFilterOptimization}
~db._create("geoFilter")
db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
| for (i = -90; i <= 90; i += 10) {
| for (j = -180; j <= 180; j += 10) {
| db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
| }
}
var query = "FOR doc in geoFilter FILTER distance(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc"
db._explain(query, {}, {colors: false});
db._query(query);
~db._drop("geoFilter")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexFilterOptimization
<!-- js/common/modules/@arangodb/arango-collection-common.js--> <!-- js/common/modules/@arangodb/arango-collection-common.js-->
@startDocuBlock collectionGeo @startDocuBlock collectionGeo

View File

@ -273,8 +273,10 @@ The geo index provides operations to find documents with coordinates nearest to
comparison coordinate, and to find documents with coordinates that are within a specifiable comparison coordinate, and to find documents with coordinates that are within a specifiable
radius around a comparison coordinate. radius around a comparison coordinate.
The geo index is used via dedicated functions in AQL or the simple queries functions, The geo index is used via dedicated functions in AQL, the simple queries
but will not be used for other types of queries or conditions. functions and it is implicitly applied when in AQL a SORT or FILTER is used with
the distance function. Otherwise it will not be used for other types of queries
or conditions.
### Fulltext Index ### Fulltext Index

View File

@ -71,7 +71,10 @@ different usage scenarios:
{ "coords": [ 50.9406645, 6.9599115 ] } { "coords": [ 50.9406645, 6.9599115 ] }
Geo indexes will only be invoked via special functions. Geo indexes will be invoked via special functions or AQL optimization. The
optimization can be triggered when a collection with geo index is enumerated
and a SORT or FILTER statement is used in conjunction with the distance
function.
- fulltext index: a fulltext index can be used to index all words contained in - fulltext index: a fulltext index can be used to index all words contained in
a specific attribute of all documents in a collection. Only words with a a specific attribute of all documents in a collection. Only words with a

View File

@ -49,8 +49,6 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
// --SECTION-- macros // --SECTION-- macros
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
#define DELETE_ENDPOINT(e) if (e != 0) delete e;
#define FACTORY_NAME(name) name ## Factory #define FACTORY_NAME(name) name ## Factory
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification) #define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
@ -58,12 +56,12 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \ #define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
e = FACTORY(type, specification); \ e = FACTORY(type, specification); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \ BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e); delete e;
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \ #define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
e = arangodb::Endpoint::serverFactory(specification, 1, true); \ e = arangodb::Endpoint::serverFactory(specification, 1, true); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \ BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e); delete e;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down // --SECTION-- setup / tear-down
@ -118,6 +116,11 @@ BOOST_AUTO_TEST_CASE (EndpointInvalid) {
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529")); BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529")); BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:")); BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65536"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65537"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:-1"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:6555555555"));
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -491,7 +494,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer1) {
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true); e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -503,7 +506,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer2) {
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true); e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -516,7 +519,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer3) {
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true); e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
#endif #endif
@ -529,7 +532,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient1) {
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1"); e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -541,7 +544,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient2) {
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1"); e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -554,7 +557,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient3) {
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket"); e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
#endif #endif
@ -575,7 +578,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerTcpIpv4WithPort) {
BOOST_CHECK_EQUAL(667, e->port()); BOOST_CHECK_EQUAL(667, e->port());
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort()); BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -596,7 +599,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerUnix) {
BOOST_CHECK_EQUAL(0, e->port()); BOOST_CHECK_EQUAL(0, e->port());
BOOST_CHECK_EQUAL("localhost", e->hostAndPort()); BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
#endif #endif
@ -617,7 +620,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientSslIpV6WithPortHttp) {
BOOST_CHECK_EQUAL(43425, e->port()); BOOST_CHECK_EQUAL(43425, e->port());
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort()); BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -637,7 +640,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientTcpIpv6WithoutPort) {
BOOST_CHECK_EQUAL(8529, e->port()); BOOST_CHECK_EQUAL(8529, e->port());
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort()); BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected()); BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e); delete e;
} }
BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()

View File

@ -38,6 +38,7 @@
using namespace arangodb::basics; using namespace arangodb::basics;
static bool Initialized = false; static bool Initialized = false;
static uint64_t counter = 0;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down // --SECTION-- setup / tear-down
@ -73,8 +74,6 @@ struct CFilesSetup {
} }
StringBuffer* writeFile (const char* blob) { StringBuffer* writeFile (const char* blob) {
static uint64_t counter = 0;
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE); StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
filename->appendText(_directory); filename->appendText(_directory);
filename->appendText("/tmp-"); filename->appendText("/tmp-");
@ -108,6 +107,71 @@ struct CFilesSetup {
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup) BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
BOOST_AUTO_TEST_CASE (tst_createdirectory) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateDirectory(filename.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename.c_str()));
res = TRI_RemoveDirectory(filename.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_createdirectoryrecursive) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << "/abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectory(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_removedirectorydeterministic) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << "/abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectoryDeterministic(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief test file exists /// @brief test file exists
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -116,6 +180,7 @@ BOOST_AUTO_TEST_CASE (tst_existsfile) {
StringBuffer* filename = writeFile(""); StringBuffer* filename = writeFile("");
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str())); BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
TRI_UnlinkFile(filename->c_str()); TRI_UnlinkFile(filename->c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename->c_str()));
delete filename; delete filename;
} }

View File

@ -25,11 +25,7 @@
#include "Agency/Agent.h" #include "Agency/Agent.h"
#include "Agency/Job.h" #include "Agency/Job.h"
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
AddFollower::AddFollower(Node const& snapshot, Agent* agent, AddFollower::AddFollower(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator, std::string const& jobId, std::string const& creator,

View File

@ -1,4 +1,4 @@
/////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER /// DISCLAIMER
/// ///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany /// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
@ -54,7 +54,6 @@
using namespace arangodb; using namespace arangodb;
using namespace arangodb::application_features; using namespace arangodb::application_features;
using namespace arangodb::basics;
using namespace arangodb::httpclient; using namespace arangodb::httpclient;
using namespace arangodb::rest; using namespace arangodb::rest;
@ -450,7 +449,7 @@ std::string AgencyCommManager::path(std::string const& p1) {
return ""; return "";
} }
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/"); return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/");
} }
std::string AgencyCommManager::path(std::string const& p1, std::string AgencyCommManager::path(std::string const& p1,
@ -459,8 +458,8 @@ std::string AgencyCommManager::path(std::string const& p1,
return ""; return "";
} }
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/") + "/" + return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/") + "/" +
StringUtils::trim(p2, "/"); basics::StringUtils::trim(p2, "/");
} }
std::string AgencyCommManager::generateStamp() { std::string AgencyCommManager::generateStamp() {
@ -674,7 +673,7 @@ void AgencyCommManager::removeEndpoint(std::string const& endpoint) {
} }
std::string AgencyCommManager::endpointsString() const { std::string AgencyCommManager::endpointsString() const {
return StringUtils::join(endpoints(), ", "); return basics::StringUtils::join(endpoints(), ", ");
} }
std::vector<std::string> AgencyCommManager::endpoints() const { std::vector<std::string> AgencyCommManager::endpoints() const {
@ -1280,7 +1279,7 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) {
for (const auto& i : VPackObjectIterator(current)) { for (const auto& i : VPackObjectIterator(current)) {
auto const endpoint = Endpoint::unifiedForm(i.value.copyString()); auto const endpoint = Endpoint::unifiedForm(i.value.copyString());
if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) { if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) {
LOG_TOPIC(INFO, Logger::CLUSTER) LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Adding endpoint " << endpoint << " to agent pool"; << "Adding endpoint " << endpoint << " to agent pool";
AgencyCommManager::MANAGER->addEndpoint(endpoint); AgencyCommManager::MANAGER->addEndpoint(endpoint);
} }
@ -1391,7 +1390,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
b.add(VPackValue(clientId)); b.add(VPackValue(clientId));
} }
LOG_TOPIC(INFO, Logger::AGENCYCOMM) << LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
"Failed agency comm (" << result._statusCode << ")! " << "Failed agency comm (" << result._statusCode << ")! " <<
"Inquiring about clientId " << clientId << "."; "Inquiring about clientId " << clientId << ".";
@ -1410,25 +1409,25 @@ AgencyCommResult AgencyComm::sendWithFailover(
for (auto const& i : VPackArrayIterator(inner)) { for (auto const& i : VPackArrayIterator(inner)) {
if (i.isUInt()) { if (i.isUInt()) {
if (i.getUInt() == 0) { if (i.getUInt() == 0) {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed: " << outer.toJson(); << body << " failed: " << outer.toJson();
return result; return result;
} else { } else {
success = true; success = true;
} }
} else { } else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed with " << outer.toJson(); << body << " failed with " << outer.toJson();
} }
} }
} }
} }
if (success) { if (success) {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " succeeded (" << outer.toJson() << ")"; << body << " succeeded (" << outer.toJson() << ")";
return inq; return inq;
} else { } else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed (" << outer.toJson() << ")"; << body << " failed (" << outer.toJson() << ")";
return result; return result;
} }
@ -1437,7 +1436,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
} }
return inq; return inq;
} else { } else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) << LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
"Inquiry failed (" << inq._statusCode << "). Keep trying ..."; "Inquiry failed (" << inq._statusCode << "). Keep trying ...";
continue; continue;
} }

View File

@ -232,7 +232,7 @@ void AgencyFeature::start() {
_agent.reset(new consensus::Agent(consensus::config_t( _agent.reset(new consensus::Agent(consensus::config_t(
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint, _size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
_agencyEndpoints, _supervision, _waitForSync, _supervisionFrequency, _agencyEndpoints, _supervision, false, _supervisionFrequency,
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod, _compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
_cmdLineTimings))); _cmdLineTimings)));

View File

@ -257,7 +257,7 @@ bool Agent::recvAppendEntriesRPC(
term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm, term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm,
index_t leaderCommitIndex, query_t const& queries) { index_t leaderCommitIndex, query_t const& queries) {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got AppendEntriesRPC from " LOG_TOPIC(TRACE, Logger::AGENCY) << "Got AppendEntriesRPC from "
<< leaderId << " with term " << term; << leaderId << " with term " << term;
// Update commit index // Update commit index
@ -276,40 +276,42 @@ bool Agent::recvAppendEntriesRPC(
size_t nqs = queries->slice().length(); size_t nqs = queries->slice().length();
// State machine, _lastCommitIndex to advance atomically // State machine, _lastCommitIndex to advance atomically
MUTEX_LOCKER(mutexLocker, _ioLock);
if (nqs > 0) { if (nqs > 0) {
MUTEX_LOCKER(mutexLocker, _ioLock);
size_t ndups = _state.removeConflicts(queries); size_t ndups = _state.removeConflicts(queries);
if (nqs > ndups) { if (nqs > ndups) {
LOG_TOPIC(TRACE, Logger::AGENCY) LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Appending " << nqs - ndups << " entries to state machine. (" << "Appending " << nqs - ndups << " entries to state machine. ("
<< nqs << ", " << ndups << ")"; << nqs << ", " << ndups << ")";
try { try {
_state.log(queries, ndups);
auto last = _state.log(queries, ndups);
_spearhead.apply(
_state.slices(last-ndups, last), last, _constituent.term());
_readDB.apply(
_state.slices(last-ndups, last), last, _constituent.term());
_lastCommitIndex = last;
if (_lastCommitIndex >= _nextCompationAfter) {
_state.compact(_lastCommitIndex);
_nextCompationAfter += _config.compactionStepSize();
}
} catch (std::exception const&) { } catch (std::exception const&) {
LOG_TOPIC(DEBUG, Logger::AGENCY) LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Malformed query: " << __FILE__ << __LINE__; << "Malformed query: " << __FILE__ << __LINE__;
} }
} }
} }
_spearhead.apply(
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
_constituent.term());
_readDB.apply(
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
_constituent.term());
_lastCommitIndex = leaderCommitIndex;
if (_lastCommitIndex >= _nextCompationAfter) {
_state.compact(_lastCommitIndex);
_nextCompationAfter += _config.compactionStepSize();
}
return true; return true;
} }
@ -348,7 +350,7 @@ void Agent::sendAppendEntriesRPC() {
duration<double> m = system_clock::now() - _lastSent[followerId]; duration<double> m = system_clock::now() - _lastSent[followerId];
if (highest == _lastHighest[followerId] && if (highest == _lastHighest[followerId] &&
m.count() < 0.5 * _config.minPing()) { m.count() < 0.25 * _config.minPing()) {
continue; continue;
} }

View File

@ -28,7 +28,6 @@
#include "Agency/MoveShard.h" #include "Agency/MoveShard.h"
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent, CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& jobId,

View File

@ -468,11 +468,13 @@ void Constituent::callElection() {
void Constituent::update(std::string const& leaderID, term_t t) { void Constituent::update(std::string const& leaderID, term_t t) {
MUTEX_LOCKER(guard, _castLock); MUTEX_LOCKER(guard, _castLock);
_term = t; _term = t;
if (_leaderID != leaderID) { if (_leaderID != leaderID) {
LOG_TOPIC(DEBUG, Logger::AGENCY) LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Constituent::update: setting _leaderID to " << leaderID << "Constituent::update: setting _leaderID to " << leaderID
<< " in term " << _term; << " in term " << _term;
_leaderID = leaderID; _leaderID = leaderID;
_role = FOLLOWER;
} }
} }
@ -546,6 +548,11 @@ void Constituent::run() {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID
<< " in term " << _term; << " in term " << _term;
} else { } else {
{
MUTEX_LOCKER(guard, _castLock);
_role = FOLLOWER;
}
while (!this->isStopping()) { while (!this->isStopping()) {
if (_role == FOLLOWER) { if (_role == FOLLOWER) {
static double const M = 1.0e6; static double const M = 1.0e6;

View File

@ -75,7 +75,7 @@ bool FailedFollower::create() {
} }
} }
_jb = std::make_shared<velocypack::Builder>(); _jb = std::make_shared<Builder>();
_jb->openArray(); _jb->openArray();
_jb->openObject(); _jb->openObject();
@ -128,7 +128,7 @@ bool FailedFollower::start() {
// Copy todo to pending // Copy todo to pending
velocypack::Builder todo, pending; Builder todo, pending;
// Get todo entry // Get todo entry
todo.openArray(); todo.openArray();
@ -254,7 +254,7 @@ JOB_STATUS FailedFollower::status() {
if (compareServerLists(planned.slice(), current.slice())) { if (compareServerLists(planned.slice(), current.slice())) {
// Remove shard from /arango/Target/FailedServers/<server> array // Remove shard from /arango/Target/FailedServers/<server> array
velocypack::Builder del; Builder del;
del.openArray(); del.openArray();
del.openObject(); del.openObject();
std::string path = _agencyPrefix + failedServersPrefix + "/" + _from; std::string path = _agencyPrefix + failedServersPrefix + "/" + _from;

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h" #include "Agency/Job.h"
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent, FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator, std::string const& jobId, std::string const& creator,

View File

@ -58,7 +58,7 @@ bool FailedServer::start() {
<< "Start FailedServer job " + _jobId + " for server " + _server; << "Start FailedServer job " + _jobId + " for server " + _server;
// Copy todo to pending // Copy todo to pending
velocypack::Builder todo, pending; Builder todo, pending;
// Get todo entry // Get todo entry
todo.openArray(); todo.openArray();
@ -210,7 +210,7 @@ bool FailedServer::create() {
std::string path = _agencyPrefix + toDoPrefix + _jobId; std::string path = _agencyPrefix + toDoPrefix + _jobId;
_jb = std::make_shared<velocypack::Builder>(); _jb = std::make_shared<Builder>();
_jb->openArray(); _jb->openArray();
_jb->openObject(); _jb->openObject();
@ -271,7 +271,7 @@ JOB_STATUS FailedServer::status() {
// mop: ohhh...server is healthy again! // mop: ohhh...server is healthy again!
bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD; bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD;
std::shared_ptr<velocypack::Builder> deleteTodos; std::shared_ptr<Builder> deleteTodos;
Node::Children const todos = _snapshot(toDoPrefix).children(); Node::Children const todos = _snapshot(toDoPrefix).children();
Node::Children const pends = _snapshot(pendingPrefix).children(); Node::Children const pends = _snapshot(pendingPrefix).children();
@ -281,7 +281,7 @@ JOB_STATUS FailedServer::status() {
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) { if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
if (serverHealthy) { if (serverHealthy) {
if (!deleteTodos) { if (!deleteTodos) {
deleteTodos.reset(new velocypack::Builder()); deleteTodos.reset(new Builder());
deleteTodos->openArray(); deleteTodos->openArray();
deleteTodos->openObject(); deleteTodos->openObject();
} }

View File

@ -66,7 +66,7 @@ void Inception::gossip() {
auto const version = config.version(); auto const version = config.version();
// Build gossip message // Build gossip message
auto out = std::make_shared<velocypack::Builder>(); auto out = std::make_shared<Builder>();
out->openObject(); out->openObject();
out->add("endpoint", VPackValue(config.endpoint())); out->add("endpoint", VPackValue(config.endpoint()));
out->add("id", VPackValue(config.id())); out->add("id", VPackValue(config.id()));
@ -169,7 +169,7 @@ bool Inception::restartingActiveAgent() {
auto const& clientEp = myConfig.endpoint(); auto const& clientEp = myConfig.endpoint();
auto const majority = (myConfig.size()+1)/2; auto const majority = (myConfig.size()+1)/2;
velocypack::Builder greeting; Builder greeting;
{ {
VPackObjectBuilder b(&greeting); VPackObjectBuilder b(&greeting);
greeting.add(clientId, VPackValue(clientEp)); greeting.add(clientId, VPackValue(clientEp));
@ -259,7 +259,7 @@ bool Inception::restartingActiveAgent() {
} }
} }
auto agency = std::make_shared<velocypack::Builder>(); auto agency = std::make_shared<Builder>();
agency->openObject(); agency->openObject();
agency->add("term", theirConfig.get("term")); agency->add("term", theirConfig.get("term"));
agency->add("id", VPackValue(theirLeaderId)); agency->add("id", VPackValue(theirLeaderId));
@ -435,7 +435,7 @@ bool Inception::estimateRAFTInterval() {
LOG_TOPIC(DEBUG, Logger::AGENCY) LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "mean(" << mean << ") stdev(" << stdev<< ")"; << "mean(" << mean << ") stdev(" << stdev<< ")";
velocypack::Builder measurement; Builder measurement;
measurement.openObject(); measurement.openObject();
measurement.add("mean", VPackValue(mean)); measurement.add("mean", VPackValue(mean));
measurement.add("stdev", VPackValue(stdev)); measurement.add("stdev", VPackValue(stdev));
@ -541,8 +541,10 @@ void Inception::run() {
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent."; LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
_agent->ready(true); _agent->ready(true);
} else { } else {
if (!this->isStopping()) {
LOG_TOPIC(FATAL, Logger::AGENCY) LOG_TOPIC(FATAL, Logger::AGENCY)
<< "Unable to restart with persisted pool. Fatal exit."; << "Unable to restart with persisted pool. Fatal exit.";
}
FATAL_ERROR_EXIT(); FATAL_ERROR_EXIT();
// FATAL ERROR // FATAL ERROR
} }

View File

@ -25,7 +25,7 @@
using namespace arangodb::consensus; using namespace arangodb::consensus;
bool arangodb::consensus::compareServerLists(velocypack::Slice plan, velocypack::Slice current) { bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
if (!plan.isArray() || !current.isArray()) { if (!plan.isArray() || !current.isArray()) {
return false; return false;
} }
@ -80,7 +80,7 @@ JOB_STATUS Job::exists() const {
bool Job::finish(std::string const& type, bool success, bool Job::finish(std::string const& type, bool success,
std::string const& reason) const { std::string const& reason) const {
velocypack::Builder pending, finished; Builder pending, finished;
// Get todo entry // Get todo entry
pending.openArray(); pending.openArray();

View File

@ -28,7 +28,6 @@
#include "Node.h" #include "Node.h"
#include "Supervision.h" #include "Supervision.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h> #include <velocypack/Iterator.h>
#include <velocypack/Slice.h> #include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h> #include <velocypack/velocypack-aliases.h>
@ -42,7 +41,7 @@ namespace consensus {
// and all others followers. Both arguments must be arrays. Returns true, // and all others followers. Both arguments must be arrays. Returns true,
// if the first items in both slice are equal and if both arrays contain // if the first items in both slice are equal and if both arrays contain
// the same set of strings. // the same set of strings.
bool compareServerLists(velocypack::Slice plan, velocypack::Slice current); bool compareServerLists(Slice plan, Slice current);
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND }; enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/", const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers";
static std::string const healthPrefix = "/Supervision/Health/"; static std::string const healthPrefix = "/Supervision/Health/";
inline arangodb::consensus::write_ret_t transact(Agent* _agent, inline arangodb::consensus::write_ret_t transact(Agent* _agent,
velocypack::Builder const& transaction, Builder const& transaction,
bool waitForCommit = true) { bool waitForCommit = true) {
query_t envelope = std::make_shared<velocypack::Builder>(); query_t envelope = std::make_shared<Builder>();
try { try {
envelope->openArray(); envelope->openArray();
@ -138,7 +137,7 @@ struct Job {
std::string _creator; std::string _creator;
std::string _agencyPrefix; std::string _agencyPrefix;
std::shared_ptr<velocypack::Builder> _jb; std::shared_ptr<Builder> _jb;
}; };

View File

@ -29,7 +29,6 @@
static std::string const DBServer = "DBServer"; static std::string const DBServer = "DBServer";
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
MoveShard::MoveShard(Node const& snapshot, Agent* agent, MoveShard::MoveShard(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator, std::string const& jobId, std::string const& creator,

View File

@ -33,9 +33,8 @@
#include <deque> #include <deque>
#include <regex> #include <regex>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::basics;
struct NotEmpty { struct NotEmpty {
bool operator()(const std::string& s) { return !s.empty(); } bool operator()(const std::string& s) { return !s.empty(); }
@ -89,16 +88,16 @@ Node::Node(std::string const& name, Store* store)
Node::~Node() {} Node::~Node() {}
/// Get slice to value buffer /// Get slice to value buffer
velocypack::Slice Node::slice() const { Slice Node::slice() const {
// Some array // Some array
if (_isArray) { if (_isArray) {
rebuildVecBuf(); rebuildVecBuf();
return velocypack::Slice(_vecBuf.data()); return Slice(_vecBuf.data());
} }
// Some value // Some value
if (!_value.empty()) { if (!_value.empty()) {
return velocypack::Slice(_value.front().data()); return Slice(_value.front().data());
} }
// Empty object // Empty object
@ -107,10 +106,10 @@ velocypack::Slice Node::slice() const {
void Node::rebuildVecBuf() const { void Node::rebuildVecBuf() const {
if (_vecBufDirty) { // Dirty vector buffer if (_vecBufDirty) { // Dirty vector buffer
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
for (auto const& i : _value) { for (auto const& i : _value) {
tmp.add(velocypack::Slice(i.data())); tmp.add(Slice(i.data()));
} }
tmp.close(); tmp.close();
_vecBuf = *tmp.steal(); _vecBuf = *tmp.steal();
@ -324,7 +323,7 @@ Store& Node::store() { return *(root()._store); }
Store const& Node::store() const { return *(root()._store); } Store const& Node::store() const { return *(root()._store); }
// velocypack value type of this node // velocypack value type of this node
velocypack::ValueType Node::valueType() const { return slice().type(); } ValueType Node::valueType() const { return slice().type(); }
// file time to live entry for this node to now + millis // file time to live entry for this node to now + millis
bool Node::addTimeToLive(long millis) { bool Node::addTimeToLive(long millis) {
@ -359,7 +358,7 @@ namespace consensus {
/// Set value /// Set value
template <> template <>
bool Node::handle<SET>(VPackSlice const& slice) { bool Node::handle<SET>(VPackSlice const& slice) {
VPackSlice val = slice.get("new"); Slice val = slice.get("new");
if (val.isObject()) { if (val.isObject()) {
if (val.hasKey("op")) { // No longer a keyword but a regular key "op" if (val.hasKey("op")) { // No longer a keyword but a regular key "op"
@ -394,12 +393,12 @@ bool Node::handle<SET>(VPackSlice const& slice) {
/// Increment integer value or set 1 /// Increment integer value or set 1
template <> template <>
bool Node::handle<INCREMENT>(VPackSlice const& slice) { bool Node::handle<INCREMENT>(VPackSlice const& slice) {
velocypack::Builder tmp; Builder tmp;
tmp.openObject(); tmp.openObject();
try { try {
tmp.add("tmp", velocypack::Value(this->slice().getInt() + 1)); tmp.add("tmp", Value(this->slice().getInt() + 1));
} catch (std::exception const&) { } catch (std::exception const&) {
tmp.add("tmp", velocypack::Value(1)); tmp.add("tmp", Value(1));
} }
tmp.close(); tmp.close();
*this = tmp.slice().get("tmp"); *this = tmp.slice().get("tmp");
@ -409,12 +408,12 @@ bool Node::handle<INCREMENT>(VPackSlice const& slice) {
/// Decrement integer value or set -1 /// Decrement integer value or set -1
template <> template <>
bool Node::handle<DECREMENT>(VPackSlice const& slice) { bool Node::handle<DECREMENT>(VPackSlice const& slice) {
velocypack::Builder tmp; Builder tmp;
tmp.openObject(); tmp.openObject();
try { try {
tmp.add("tmp", velocypack::Value(this->slice().getInt() - 1)); tmp.add("tmp", Value(this->slice().getInt() - 1));
} catch (std::exception const&) { } catch (std::exception const&) {
tmp.add("tmp", velocypack::Value(-1)); tmp.add("tmp", Value(-1));
} }
tmp.close(); tmp.close();
*this = tmp.slice().get("tmp"); *this = tmp.slice().get("tmp");
@ -429,7 +428,7 @@ bool Node::handle<PUSH>(VPackSlice const& slice) {
<< slice.toJson(); << slice.toJson();
return false; return false;
} }
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
if (this->slice().isArray()) { if (this->slice().isArray()) {
for (auto const& old : VPackArrayIterator(this->slice())) tmp.add(old); for (auto const& old : VPackArrayIterator(this->slice())) tmp.add(old);
@ -448,7 +447,7 @@ bool Node::handle<ERASE>(VPackSlice const& slice) {
<< "Operator erase without value to be erased: " << slice.toJson(); << "Operator erase without value to be erased: " << slice.toJson();
return false; return false;
} }
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
if (this->slice().isArray()) { if (this->slice().isArray()) {
for (auto const& old : VPackArrayIterator(this->slice())) { for (auto const& old : VPackArrayIterator(this->slice())) {
@ -475,7 +474,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
<< slice.toJson(); << slice.toJson();
return false; return false;
} }
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
if (this->slice().isArray()) { if (this->slice().isArray()) {
for (auto const& old : VPackArrayIterator(this->slice())) { for (auto const& old : VPackArrayIterator(this->slice())) {
@ -494,7 +493,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
/// Remove element from end of array. /// Remove element from end of array.
template <> template <>
bool Node::handle<POP>(VPackSlice const& slice) { bool Node::handle<POP>(VPackSlice const& slice) {
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
if (this->slice().isArray()) { if (this->slice().isArray()) {
VPackArrayIterator it(this->slice()); VPackArrayIterator it(this->slice());
@ -519,7 +518,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
<< slice.toJson(); << slice.toJson();
return false; return false;
} }
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
tmp.add(slice.get("new")); tmp.add(slice.get("new"));
if (this->slice().isArray()) { if (this->slice().isArray()) {
@ -533,7 +532,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
/// Remove element from front of array /// Remove element from front of array
template <> template <>
bool Node::handle<SHIFT>(VPackSlice const& slice) { bool Node::handle<SHIFT>(VPackSlice const& slice) {
velocypack::Builder tmp; Builder tmp;
tmp.openArray(); tmp.openArray();
if (this->slice().isArray()) { // If a if (this->slice().isArray()) { // If a
VPackArrayIterator it(this->slice()); VPackArrayIterator it(this->slice());
@ -678,7 +677,7 @@ bool Node::applies(VPackSlice const& slice) {
return true; return true;
} }
void Node::toBuilder(velocypack::Builder& builder, bool showHidden) const { void Node::toBuilder(Builder& builder, bool showHidden) const {
try { try {
if (type() == NODE) { if (type() == NODE) {
VPackObjectBuilder guard(&builder); VPackObjectBuilder guard(&builder);
@ -729,7 +728,7 @@ Node::Children& Node::children() { return _children; }
Node::Children const& Node::children() const { return _children; } Node::Children const& Node::children() const { return _children; }
std::string Node::toJson() const { std::string Node::toJson() const {
velocypack::Builder builder; Builder builder;
builder.openArray(); builder.openArray();
toBuilder(builder); toBuilder(builder);
builder.close(); builder.close();
@ -796,7 +795,7 @@ std::string Node::getString() const {
return slice().copyString(); return slice().copyString();
} }
velocypack::Slice Node::getArray() const { Slice Node::getArray() const {
if (type() == NODE) { if (type() == NODE) {
throw StoreException("Must not convert NODE type to array"); throw StoreException("Must not convert NODE type to array");
} }
@ -804,6 +803,6 @@ velocypack::Slice Node::getArray() const {
throw StoreException("Not an array type"); throw StoreException("Not an array type");
} }
rebuildVecBuf(); rebuildVecBuf();
return velocypack::Slice(_vecBuf.data()); return Slice(_vecBuf.data());
} }

View File

@ -50,6 +50,8 @@ enum Operation {
REPLACE REPLACE
}; };
using namespace arangodb::velocypack;
class StoreException : public std::exception { class StoreException : public std::exception {
public: public:
explicit StoreException(std::string const& message) : _message(message) {} explicit StoreException(std::string const& message) : _message(message) {}
@ -159,7 +161,7 @@ class Node {
bool handle(arangodb::velocypack::Slice const&); bool handle(arangodb::velocypack::Slice const&);
/// @brief Create Builder representing this store /// @brief Create Builder representing this store
void toBuilder(velocypack::Builder&, bool showHidden = false) const; void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Access children /// @brief Access children
Children& children(); Children& children();
@ -168,10 +170,10 @@ class Node {
Children const& children() const; Children const& children() const;
/// @brief Create slice from value /// @brief Create slice from value
velocypack::Slice slice() const; Slice slice() const;
/// @brief Get value type /// @brief Get value type
velocypack::ValueType valueType() const; ValueType valueType() const;
/// @brief Add observer for this node /// @brief Add observer for this node
bool addObserver(std::string const&); bool addObserver(std::string const&);
@ -216,7 +218,7 @@ class Node {
std::string getString() const; std::string getString() const;
/// @brief Get array value /// @brief Get array value
velocypack::Slice getArray() const; Slice getArray() const;
protected: protected:
/// @brief Add time to live entry /// @brief Add time to live entry
@ -232,8 +234,8 @@ class Node {
Store* _store; ///< @brief Store Store* _store; ///< @brief Store
Children _children; ///< @brief child nodes Children _children; ///< @brief child nodes
TimePoint _ttl; ///< @brief my expiry TimePoint _ttl; ///< @brief my expiry
std::vector<velocypack::Buffer<uint8_t>> _value; ///< @brief my value std::vector<Buffer<uint8_t>> _value; ///< @brief my value
mutable velocypack::Buffer<uint8_t> _vecBuf; mutable Buffer<uint8_t> _vecBuf;
mutable bool _vecBufDirty; mutable bool _vecBufDirty;
bool _isArray; bool _isArray;
}; };

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h" #include "Agency/Job.h"
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent, RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator, std::string const& jobId, std::string const& creator,

View File

@ -35,10 +35,10 @@
#include "Rest/Version.h" #include "Rest/Version.h"
using namespace arangodb; using namespace arangodb;
using namespace arangodb::basics; using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::rest; using namespace arangodb::rest;
using namespace arangodb::velocypack; using namespace arangodb::consensus;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief ArangoDB server /// @brief ArangoDB server

View File

@ -184,7 +184,7 @@ class State {
size_t _cur; size_t _cur;
/// @brief Operation options /// @brief Operation options
OperationOptions _options; arangodb::OperationOptions _options;
/// @brief Empty log entry; /// @brief Empty log entry;
static log_t emptyLog; static log_t emptyLog;

View File

@ -40,9 +40,8 @@
#include <iomanip> #include <iomanip>
#include <regex> #include <regex>
using namespace arangodb::basics;
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack; using namespace arangodb::basics;
/// Non-Emptyness of string /// Non-Emptyness of string
struct NotEmpty { struct NotEmpty {

View File

@ -28,9 +28,6 @@
#include "Basics/Thread.h" #include "Basics/Thread.h"
#include "Node.h" #include "Node.h"
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
namespace arangodb { namespace arangodb {
namespace consensus { namespace consensus {
@ -61,10 +58,10 @@ class Store : public arangodb::Thread {
std::vector<bool> apply(query_t const& query, bool verbose = false); std::vector<bool> apply(query_t const& query, bool verbose = false);
/// @brief Apply single entry in query /// @brief Apply single entry in query
bool apply(velocypack::Slice const& query, bool verbose = false); bool apply(Slice const& query, bool verbose = false);
/// @brief Apply entry in query /// @brief Apply entry in query
std::vector<bool> apply(std::vector<velocypack::Slice> const& query, std::vector<bool> apply(std::vector<Slice> const& query,
index_t lastCommitIndex, term_t term, index_t lastCommitIndex, term_t term,
bool inform = true); bool inform = true);
@ -82,7 +79,7 @@ class Store : public arangodb::Thread {
bool start(); bool start();
/// @brief Dump everything to builder /// @brief Dump everything to builder
void dumpToBuilder(velocypack::Builder&) const; void dumpToBuilder(Builder&) const;
/// @brief Notify observers /// @brief Notify observers
void notifyObservers() const; void notifyObservers() const;
@ -93,7 +90,7 @@ class Store : public arangodb::Thread {
Store& operator=(VPackSlice const& slice); Store& operator=(VPackSlice const& slice);
/// @brief Create Builder representing this store /// @brief Create Builder representing this store
void toBuilder(velocypack::Builder&, bool showHidden = false) const; void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Copy out a node /// @brief Copy out a node
Node get(std::string const& path) const; Node get(std::string const& path) const;

View File

@ -41,9 +41,9 @@
#include "Basics/MutexLocker.h" #include "Basics/MutexLocker.h"
using namespace arangodb; using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack; using namespace arangodb::application_features;
std::string Supervision::_agencyPrefix = "/arango"; std::string Supervision::_agencyPrefix = "/arango";

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h" #include "Agency/Job.h"
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
UnassumedLeadership::UnassumedLeadership( UnassumedLeadership::UnassumedLeadership(
Node const& snapshot, Agent* agent, std::string const& jobId, Node const& snapshot, Agent* agent, std::string const& jobId,

View File

@ -39,7 +39,6 @@ using namespace arangodb;
using namespace arangodb::application_features; using namespace arangodb::application_features;
using namespace arangodb::basics; using namespace arangodb::basics;
using namespace arangodb::consensus; using namespace arangodb::consensus;
using namespace arangodb::velocypack;
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) { static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate); TRI_V8_TRY_CATCH_BEGIN(isolate);

View File

@ -418,7 +418,6 @@ struct AstNode {
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const { bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
auto node = getAttributeAccessForVariable(allowIndexedAccess); auto node = getAttributeAccessForVariable(allowIndexedAccess);
if (node == nullptr) { if (node == nullptr) {
return false; return false;
} }

View File

@ -33,6 +33,7 @@
#include "Aql/AqlValue.h" #include "Aql/AqlValue.h"
#include "Aql/BlockCollector.h" #include "Aql/BlockCollector.h"
#include "Aql/ExecutionEngine.h" #include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionStats.h"
#include "Basics/Exceptions.h" #include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h" #include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h" #include "Basics/StringBuffer.h"
@ -1328,7 +1329,7 @@ int RemoteBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
responseBodyBuf.c_str(), responseBodyBuf.length()); responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice(); VPackSlice slice = builder->slice();
if (slice.hasKey("code")) { if (slice.hasKey("code")) {
return slice.get("code").getNumericValue<int>(); return slice.get("code").getNumericValue<int>();
} }
@ -1362,9 +1363,14 @@ int RemoteBlock::shutdown(int errorCode) {
std::shared_ptr<VPackBuilder> builder = std::shared_ptr<VPackBuilder> builder =
VPackParser::fromJson(responseBodyBuf.c_str(), responseBodyBuf.length()); VPackParser::fromJson(responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice(); VPackSlice slice = builder->slice();
// read "warnings" attribute if present and add it to our query
if (slice.isObject()) { if (slice.isObject()) {
if (slice.hasKey("stats")) {
ExecutionStats newStats(slice.get("stats"));
_engine->_stats.add(newStats);
}
// read "warnings" attribute if present and add it to our query
VPackSlice warnings = slice.get("warnings"); VPackSlice warnings = slice.get("warnings");
if (warnings.isArray()) { if (warnings.isArray()) {
auto query = _engine->getQuery(); auto query = _engine->getQuery();
@ -1415,19 +1421,14 @@ AqlItemBlock* RemoteBlock::getSome(size_t atLeast, size_t atMost) {
res->result->getBodyVelocyPack(); res->result->getBodyVelocyPack();
VPackSlice responseBody = responseBodyBuilder->slice(); VPackSlice responseBody = responseBodyBuilder->slice();
ExecutionStats newStats(responseBody.get("stats"));
_engine->_stats.addDelta(_deltaStats, newStats);
_deltaStats = newStats;
if (VelocyPackHelper::getBooleanValue(responseBody, "exhausted", true)) { if (VelocyPackHelper::getBooleanValue(responseBody, "exhausted", true)) {
traceGetSomeEnd(nullptr); traceGetSomeEnd(nullptr);
return nullptr; return nullptr;
} }
auto r = new arangodb::aql::AqlItemBlock(_engine->getQuery()->resourceMonitor(), responseBody); auto r = std::make_unique<AqlItemBlock>(_engine->getQuery()->resourceMonitor(), responseBody);
traceGetSomeEnd(r); traceGetSomeEnd(r.get());
return r; return r.release();
// cppcheck-suppress style // cppcheck-suppress style
DEBUG_END_BLOCK(); DEBUG_END_BLOCK();

View File

@ -28,7 +28,6 @@
#include "Aql/ClusterNodes.h" #include "Aql/ClusterNodes.h"
#include "Aql/ExecutionBlock.h" #include "Aql/ExecutionBlock.h"
#include "Aql/ExecutionNode.h" #include "Aql/ExecutionNode.h"
#include "Aql/ExecutionStats.h"
#include "Rest/GeneralRequest.h" #include "Rest/GeneralRequest.h"
namespace arangodb { namespace arangodb {
@ -339,9 +338,6 @@ class RemoteBlock : public ExecutionBlock {
/// @brief the ID of the query on the server as a string /// @brief the ID of the query on the server as a string
std::string _queryId; std::string _queryId;
/// @brief the ID of the query on the server as a string
ExecutionStats _deltaStats;
/// @brief whether or not this block will forward initialize, /// @brief whether or not this block will forward initialize,
/// initializeCursor or shutDown requests /// initializeCursor or shutDown requests
bool const _isResponsibleForInitializeCursor; bool const _isResponsibleForInitializeCursor;

View File

@ -529,6 +529,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
VPackBuilder tmp; VPackBuilder tmp;
query->ast()->variables()->toVelocyPack(tmp); query->ast()->variables()->toVelocyPack(tmp);
result.add("initialize", VPackValue(false));
result.add("variables", tmp.slice()); result.add("variables", tmp.slice());
result.add("collections", VPackValue(VPackValueType::Array)); result.add("collections", VPackValue(VPackValueType::Array));
@ -1133,7 +1134,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
bool const isCoordinator = bool const isCoordinator =
arangodb::ServerState::instance()->isCoordinator(role); arangodb::ServerState::instance()->isCoordinator(role);
bool const isDBServer = arangodb::ServerState::instance()->isDBServer(role); bool const isDBServer = arangodb::ServerState::instance()->isDBServer(role);
TRI_ASSERT(queryRegistry != nullptr); TRI_ASSERT(queryRegistry != nullptr);
ExecutionEngine* engine = nullptr; ExecutionEngine* engine = nullptr;
@ -1354,8 +1355,11 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
} }
engine->_root = root; engine->_root = root;
root->initialize();
root->initializeCursor(nullptr, 0); if (plan->isResponsibleForInitialize()) {
root->initialize();
root->initializeCursor(nullptr, 0);
}
return engine; return engine;
} catch (...) { } catch (...) {

View File

@ -177,6 +177,7 @@ ExecutionPlan::ExecutionPlan(Ast* ast)
: _ids(), : _ids(),
_root(nullptr), _root(nullptr),
_varUsageComputed(false), _varUsageComputed(false),
_isResponsibleForInitialize(true),
_nextId(0), _nextId(0),
_ast(ast), _ast(ast),
_lastLimitNode(nullptr), _lastLimitNode(nullptr),
@ -280,6 +281,7 @@ ExecutionPlan* ExecutionPlan::clone() {
plan->_root = _root->clone(plan.get(), true, false); plan->_root = _root->clone(plan.get(), true, false);
plan->_nextId = _nextId; plan->_nextId = _nextId;
plan->_appliedRules = _appliedRules; plan->_appliedRules = _appliedRules;
plan->_isResponsibleForInitialize = _isResponsibleForInitialize;
CloneNodeAdder adder(plan.get()); CloneNodeAdder adder(plan.get());
plan->_root->walk(&adder); plan->_root->walk(&adder);
@ -348,6 +350,7 @@ void ExecutionPlan::toVelocyPack(VPackBuilder& builder, Ast* ast, bool verbose)
size_t nrItems = 0; size_t nrItems = 0;
builder.add("estimatedCost", VPackValue(_root->getCost(nrItems))); builder.add("estimatedCost", VPackValue(_root->getCost(nrItems)));
builder.add("estimatedNrItems", VPackValue(nrItems)); builder.add("estimatedNrItems", VPackValue(nrItems));
builder.add("initialize", VPackValue(_isResponsibleForInitialize));
builder.close(); builder.close();
} }
@ -1882,17 +1885,22 @@ void ExecutionPlan::insertDependency(ExecutionNode* oldNode,
/// @brief create a plan from VPack /// @brief create a plan from VPack
ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) { ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
ExecutionNode* ret = nullptr;
if (!slice.isObject()) { if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan slice is not an object"); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan slice is not an object");
} }
if (slice.hasKey("initialize")) {
// whether or not this plan (or fragment) is responsible for calling initialize
_isResponsibleForInitialize = slice.get("initialize").getBoolean();
}
VPackSlice nodes = slice.get("nodes"); VPackSlice nodes = slice.get("nodes");
if (!nodes.isArray()) { if (!nodes.isArray()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan \"nodes\" attribute is not an array"); THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan \"nodes\" attribute is not an array");
} }
ExecutionNode* ret = nullptr;
// first, re-create all nodes from the Slice, using the node ids // first, re-create all nodes from the Slice, using the node ids
// no dependency links will be set up in this step // no dependency links will be set up in this step

View File

@ -75,6 +75,8 @@ class ExecutionPlan {
/// @brief check if the plan is empty /// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); } inline bool empty() const { return (_root == nullptr); }
bool isResponsibleForInitialize() const { return _isResponsibleForInitialize; }
/// @brief note that an optimizer rule was applied /// @brief note that an optimizer rule was applied
inline void addAppliedRule(int level) { _appliedRules.emplace_back(level); } inline void addAppliedRule(int level) { _appliedRules.emplace_back(level); }
@ -299,6 +301,8 @@ class ExecutionPlan {
/// @brief flag to indicate whether the variable usage is computed /// @brief flag to indicate whether the variable usage is computed
bool _varUsageComputed; bool _varUsageComputed;
bool _isResponsibleForInitialize;
/// @brief auto-increment sequence for node ids /// @brief auto-increment sequence for node ids
size_t _nextId; size_t _nextId;

View File

@ -74,9 +74,7 @@ ExecutionStats::ExecutionStats()
executionTime(0.0) {} executionTime(0.0) {}
ExecutionStats::ExecutionStats(VPackSlice const& slice) ExecutionStats::ExecutionStats(VPackSlice const& slice)
: httpRequests(0), : ExecutionStats() {
fullCount(-1),
executionTime(0.0) {
if (!slice.isObject()) { if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"stats is not an object"); "stats is not an object");
@ -88,7 +86,6 @@ ExecutionStats::ExecutionStats(VPackSlice const& slice)
scannedIndex = slice.get("scannedIndex").getNumber<int64_t>(); scannedIndex = slice.get("scannedIndex").getNumber<int64_t>();
filtered = slice.get("filtered").getNumber<int64_t>(); filtered = slice.get("filtered").getNumber<int64_t>();
if (slice.hasKey("httpRequests")) { if (slice.hasKey("httpRequests")) {
httpRequests = slice.get("httpRequests").getNumber<int64_t>(); httpRequests = slice.get("httpRequests").getNumber<int64_t>();
} }

View File

@ -58,21 +58,22 @@ struct ExecutionStats {
scannedIndex += summand.scannedIndex; scannedIndex += summand.scannedIndex;
filtered += summand.filtered; filtered += summand.filtered;
httpRequests += summand.httpRequests; httpRequests += summand.httpRequests;
fullCount += summand.fullCount; if (summand.fullCount > 0) {
// fullCount may be negative, don't add it then
fullCount += summand.fullCount;
}
// intentionally no modification of executionTime // intentionally no modification of executionTime
} }
/// @brief sumarize the delta of two other sets of ExecutionStats to us void clear() {
void addDelta(ExecutionStats const& lastStats, writesExecuted = 0;
ExecutionStats const& newStats) { writesIgnored = 0;
writesExecuted += newStats.writesExecuted - lastStats.writesExecuted; scannedFull = 0;
writesIgnored += newStats.writesIgnored - lastStats.writesIgnored; scannedIndex = 0;
scannedFull += newStats.scannedFull - lastStats.scannedFull; filtered = 0;
scannedIndex += newStats.scannedIndex - lastStats.scannedIndex; httpRequests = 0;
filtered += newStats.filtered - lastStats.filtered; fullCount = -1;
httpRequests += newStats.httpRequests - lastStats.httpRequests; executionTime = 0.0;
fullCount += newStats.fullCount - lastStats.fullCount;
// intentionally no modification of executionTime
} }
/// @brief number of successfully executed write operations /// @brief number of successfully executed write operations

View File

@ -2350,13 +2350,15 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt, ExecutionPlan* plan,
// Using Index for sort only works if all indexes are equal. // Using Index for sort only works if all indexes are equal.
auto first = allIndexes[0].getIndex(); auto first = allIndexes[0].getIndex();
for (auto const& path : first->fieldNames()) { if (first->isSorted()) {
elements.emplace_back(sortVariable, !isSortReverse, path); for (auto const& path : first->fieldNames()) {
} elements.emplace_back(sortVariable, !isSortReverse, path);
for (auto const& it : allIndexes) { }
if (first != it.getIndex()) { for (auto const& it : allIndexes) {
elements.clear(); if (first != it.getIndex()) {
break; elements.clear();
break;
}
} }
} }
} else if (nodeType == ExecutionNode::INSERT || } else if (nodeType == ExecutionNode::INSERT ||
@ -4098,47 +4100,54 @@ MMFilesGeoIndexInfo iterativePreorderWithCondition(EN::NodeType type, AstNode* r
return MMFilesGeoIndexInfo{}; return MMFilesGeoIndexInfo{};
} }
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode*,AstNode*> const& pair, ExecutionPlan* plan, MMFilesGeoIndexInfo info){ MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode const*, AstNode const*> const& pair,
using SV = std::vector<std::string>; ExecutionPlan* plan, MMFilesGeoIndexInfo info){
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess1;
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess2;
// first and second should be based on the same document - need to provide the document // first and second should be based on the same document - need to provide the document
// in order to see which collection is bound to it and if that collections supports geo-index // in order to see which collection is bound to it and if that collections supports geo-index
if( !pair.first->isAttributeAccessForVariable() || !pair.second->isAttributeAccessForVariable()){ if (!pair.first->isAttributeAccessForVariable(attributeAccess1) ||
!pair.second->isAttributeAccessForVariable(attributeAccess2)) {
info.invalidate(); info.invalidate();
return info; return info;
} }
TRI_ASSERT(attributeAccess1.first != nullptr);
TRI_ASSERT(attributeAccess2.first != nullptr);
// expect access of the for doc.attribute // expect access of the for doc.attribute
// TODO: more complex access path have to be added: loop until REFERENCE TYPE IS FOUND auto setter1 = plan->getVarSetBy(attributeAccess1.first->id);
auto setter1 = plan->getVarSetBy(static_cast<Variable const*>(pair.first->getMember(0)->getData())->id); auto setter2 = plan->getVarSetBy(attributeAccess2.first->id);
auto setter2 = plan->getVarSetBy(static_cast<Variable const*>(pair.second->getMember(0)->getData())->id);
SV accessPath1{pair.first->getString()};
SV accessPath2{pair.second->getString()};
if(setter1 == setter2){ if (setter1 != nullptr &&
if(setter1->getType() == EN::ENUMERATE_COLLECTION){ setter2 != nullptr &&
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1); setter1 == setter2 &&
setter1->getType() == EN::ENUMERATE_COLLECTION) {
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
auto lcoll = coll->getCollection();
// TODO - check collection for suitable geo-indexes
for(auto indexShardPtr : lcoll->getIndexes()){
// get real index
arangodb::Index& index = *indexShardPtr.get();
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes // check if current index is a geo-index
auto lcoll = coll->getCollection(); if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
// TODO - check collection for suitable geo-indexes && index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX) {
for(auto indexShardPtr : lcoll->getIndexes()){ continue;
// get real index }
arangodb::Index& index = *indexShardPtr.get();
// check if current index is a geo-index TRI_ASSERT(index.fields().size() == 2);
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX){
continue;
}
//check access paths of attributes in ast and those in index match //check access paths of attributes in ast and those in index match
if( index.fieldNames()[0] == accessPath1 && index.fieldNames()[1] == accessPath2 ){ if (index.fields()[0] == attributeAccess1.second &&
info.collectionNode = collNode; index.fields()[1] == attributeAccess2.second) {
info.index = indexShardPtr; info.collectionNode = collNode;
info.longitude = std::move(accessPath1); info.index = indexShardPtr;
info.latitude = std::move(accessPath2); TRI_AttributeNamesJoinNested(attributeAccess1.second, info.longitude, true);
return info; TRI_AttributeNamesJoinNested(attributeAccess2.second, info.latitude, true);
} return info;
} }
} }
} }

View File

@ -731,14 +731,12 @@ QueryResult Query::execute(QueryRegistry* registry) {
} }
_trx->commit(); _trx->commit();
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime); _engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>(); auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get())); cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
result.context = _trx->transactionContext();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR);
enterState(FINALIZATION); enterState(FINALIZATION);
@ -913,18 +911,15 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
_trx->commit(); _trx->commit();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get()));
result.context = _trx->transactionContext();
LOG_TOPIC(DEBUG, Logger::QUERIES) LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " " << TRI_microtime() - _startTime << " "
<< "Query::executeV8: before cleanupPlanAndEngine" << "Query::executeV8: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this; << " this: " << (uintptr_t) this;
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR); result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION); enterState(FINALIZATION);
@ -1387,10 +1382,13 @@ std::string Query::getStateString() const {
} }
/// @brief cleanup plan and engine for current query /// @brief cleanup plan and engine for current query
void Query::cleanupPlanAndEngine(int errorCode) { void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
if (_engine != nullptr) { if (_engine != nullptr) {
try { try {
_engine->shutdown(errorCode); _engine->shutdown(errorCode);
if (statsBuilder != nullptr) {
_engine->_stats.toVelocyPack(*statsBuilder);
}
} catch (...) { } catch (...) {
// shutdown may fail but we must not throw here // shutdown may fail but we must not throw here
// (we're also called from the destructor) // (we're also called from the destructor)

View File

@ -378,7 +378,7 @@ class Query {
void enterState(ExecutionState); void enterState(ExecutionState);
/// @brief cleanup plan and engine for current query /// @brief cleanup plan and engine for current query
void cleanupPlanAndEngine(int); void cleanupPlanAndEngine(int, VPackBuilder* statsBuilder = nullptr);
/// @brief create a TransactionContext /// @brief create a TransactionContext
std::shared_ptr<arangodb::TransactionContext> createTransactionContext(); std::shared_ptr<arangodb::TransactionContext> createTransactionContext();

View File

@ -697,7 +697,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try { try {
res = query->trx()->lockCollections(); res = query->trx()->lockCollections();
} catch (...) { } catch (...) {
LOG(ERR) << "lock lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"lock lead to an exception"); "lock lead to an exception");
@ -726,15 +725,10 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
if (items.get() == nullptr) { if (items.get() == nullptr) {
answerBuilder.add("exhausted", VPackValue(true)); answerBuilder.add("exhausted", VPackValue(true));
answerBuilder.add("error", VPackValue(false)); answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else { } else {
try { try {
items->toVelocyPack(query->trx(), answerBuilder); items->toVelocyPack(query->trx(), answerBuilder);
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) { } catch (...) {
LOG(ERR) << "cannot transform AqlItemBlock to VelocyPack";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"cannot transform AqlItemBlock to VelocyPack"); "cannot transform AqlItemBlock to VelocyPack");
@ -760,7 +754,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
skipped = block->skipSomeForShard(atLeast, atMost, shardId); skipped = block->skipSomeForShard(atLeast, atMost, shardId);
} }
} catch (...) { } catch (...) {
LOG(ERR) << "skipSome lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"skipSome lead to an exception"); "skipSome lead to an exception");
@ -768,8 +761,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
} }
answerBuilder.add("skipped", VPackValue(static_cast<double>(skipped))); answerBuilder.add("skipped", VPackValue(static_cast<double>(skipped)));
answerBuilder.add("error", VPackValue(false)); answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "skip") { } else if (operation == "skip") {
auto number = auto number =
VelocyPackHelper::getNumericValue<size_t>(querySlice, "number", 1); VelocyPackHelper::getNumericValue<size_t>(querySlice, "number", 1);
@ -789,10 +780,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
} }
answerBuilder.add("exhausted", VPackValue(exhausted)); answerBuilder.add("exhausted", VPackValue(exhausted));
answerBuilder.add("error", VPackValue(false)); answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) { } catch (...) {
LOG(ERR) << "skip lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"skip lead to an exception"); "skip lead to an exception");
@ -803,7 +791,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try { try {
res = query->engine()->initialize(); res = query->engine()->initialize();
} catch (...) { } catch (...) {
LOG(ERR) << "initialize lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"initialize lead to an exception"); "initialize lead to an exception");
@ -825,7 +812,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
res = query->engine()->initializeCursor(items.get(), pos); res = query->engine()->initializeCursor(items.get(), pos);
} }
} catch (...) { } catch (...) {
LOG(ERR) << "initializeCursor lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"initializeCursor lead to an exception"); "initializeCursor lead to an exception");
@ -833,8 +819,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
} }
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR)); answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(static_cast<double>(res))); answerBuilder.add("code", VPackValue(static_cast<double>(res)));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "shutdown") { } else if (operation == "shutdown") {
int res = TRI_ERROR_INTERNAL; int res = TRI_ERROR_INTERNAL;
int errorCode = VelocyPackHelper::getNumericValue<int>( int errorCode = VelocyPackHelper::getNumericValue<int>(
@ -854,7 +838,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
_queryRegistry->destroy(_vocbase, _qId, errorCode); _queryRegistry->destroy(_vocbase, _qId, errorCode);
_qId = 0; _qId = 0;
} catch (...) { } catch (...) {
LOG(ERR) << "shutdown lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR, generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"shutdown lead to an exception"); "shutdown lead to an exception");
@ -863,7 +846,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR)); answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(res)); answerBuilder.add("code", VPackValue(res));
} else { } else {
LOG(ERR) << "Unknown operation!";
generateError(rest::ResponseCode::NOT_FOUND, generateError(rest::ResponseCode::NOT_FOUND,
TRI_ERROR_HTTP_NOT_FOUND); TRI_ERROR_HTTP_NOT_FOUND);
return; return;
@ -875,7 +857,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
generateError(rest::ResponseCode::BAD, e.code()); generateError(rest::ResponseCode::BAD, e.code());
return; return;
} catch (...) { } catch (...) {
LOG(ERR) << "OUT OF MEMORY when handling query.";
generateError(rest::ResponseCode::BAD, TRI_ERROR_OUT_OF_MEMORY); generateError(rest::ResponseCode::BAD, TRI_ERROR_OUT_OF_MEMORY);
return; return;
} }

View File

@ -2033,6 +2033,7 @@ int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
} }
if (nrok != (int)DBservers.size()) { if (nrok != (int)DBservers.size()) {
LOG(WARN) << "could not flush WAL on all servers. confirmed: " << nrok << ", expected: " << DBservers.size();
return TRI_ERROR_INTERNAL; return TRI_ERROR_INTERNAL;
} }

View File

@ -54,6 +54,8 @@ void DBServerAgencySync::work() {
DBServerAgencySyncResult DBServerAgencySync::execute() { DBServerAgencySyncResult DBServerAgencySync::execute() {
// default to system database // default to system database
double startTime = TRI_microtime();
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting"; LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting";
DatabaseFeature* database = DatabaseFeature* database =
ApplicationServer::getFeature<DatabaseFeature>("Database"); ApplicationServer::getFeature<DatabaseFeature>("Database");
@ -80,6 +82,11 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
return result; return result;
} }
double now = TRI_microtime();
if (now - startTime > 5.0) {
LOG(INFO) << "DBServerAgencySync::execute took more than 5s to get free V8 context, starting handle-plan-change now";
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context)); TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
auto isolate = context->_isolate; auto isolate = context->_isolate;

View File

@ -118,7 +118,7 @@ friend class MMFilesGeoIndexIterator;
bool canBeDropped() const override { return true; } bool canBeDropped() const override { return true; }
bool isSorted() const override { return false; } bool isSorted() const override { return true; }
bool hasSelectivityEstimate() const override { return false; } bool hasSelectivityEstimate() const override { return false; }

View File

@ -1,17 +1,12 @@
# -*- mode: CMAKE; -*- # -*- mode: CMAKE; -*-
# these are the install targets for the client package. # these are the install targets for the client package.
# we can't use RUNTIME DESTINATION here. # we can't use RUNTIME DESTINATION here.
# include(/tmp/dump_vars.cmake)
message( "CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}/ CMAKE_INSTALL_SBINDIR ${CMAKE_INSTALL_SBINDIR}")
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip") install_debinfo(
execute_process(COMMAND mkdir -p ${STRIP_DIR}) "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_SBINDIR}"
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}) "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}) "${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
if (NOT MSVC AND CMAKE_STRIP) )
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_SBINDIR})

View File

@ -2,63 +2,30 @@
# these are the install targets for the client package. # these are the install targets for the client package.
# we can't use RUNTIME DESTINATION here. # we can't use RUNTIME DESTINATION here.
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
execute_process(COMMAND mkdir -p ${STRIP_DIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}) install_debinfo(
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}) "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
if (NOT MSVC AND CMAKE_STRIP) "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
execute_process(COMMAND "rm" -f ${STRIP_FILE}) "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}"
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE}) "${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}")
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}"
"${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}"
"${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
"${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}"
"${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}")

View File

@ -19,9 +19,9 @@ endif()
# debug info directory: # debug info directory:
if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64") if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64")
# some systems have weird places for usr/lib: # some systems have weird places for usr/lib:
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/${CMAKE_PROJECT_NAME}") set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/")
else () else ()
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_PROJECT_NAME}") set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/")
endif () endif ()
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}") set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}")

View File

@ -157,3 +157,39 @@ macro(to_native_path sourceVarName)
endif() endif()
set("INC_${sourceVarName}" ${myVar}) set("INC_${sourceVarName}" ${myVar})
endmacro() endmacro()
macro(install_debinfo
STRIP_DIR
USER_SUB_DEBINFO_DIR
USER_FILE
USER_STRIP_FILE)
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
set(FILE ${USER_FILE})
set(STRIP_FILE ${USER_STRIP_FILE})
execute_process(COMMAND mkdir -p ${STRIP_DIR})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
if (NOT ${FILE_CHECKSUM} STREQUAL "")
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
endif()
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
endmacro()

View File

@ -1,7 +1,7 @@
################################################################################ ################################################################################
# the client package is a complete cmake sub package. # the client package is a complete cmake sub package.
################################################################################ ################################################################################
project(PACKAGE-DBG) project(@CMAKE_PROJECT_NAME@)
cmake_minimum_required(VERSION 2.8) cmake_minimum_required(VERSION 2.8)
################################################################################ ################################################################################
@ -15,6 +15,9 @@ set(CROSS_COMPILING @CROSS_COMPILING@)
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@) set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@) set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
set(CMAKE_INSTALL_SBINDIR @CMAKE_INSTALL_SBINDIR@)
set(CMAKE_INSTALL_FULL_SBINDIR @CMAKE_INSTALL_FULL_SBINDIR@)
set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@) set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@)
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@) set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@)
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@) set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@)

View File

@ -69,6 +69,10 @@
return shortName; return shortName;
}, },
getDatabaseShortName: function (id) {
return this.getCoordinatorShortName(id);
},
getDatabaseServerId: function (shortname) { getDatabaseServerId: function (shortname) {
var id; var id;
if (window.clusterHealth) { if (window.clusterHealth) {

View File

@ -186,14 +186,15 @@
async: true, async: true,
success: function (data) { success: function (data) {
if (data.id) { if (data.id) {
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.'); console.log(toServer);
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
window.setTimeout(function () { window.setTimeout(function () {
window.App.shardsView.render(); window.App.shardsView.render();
}, 3000); }, 3000);
} }
}, },
error: function () { error: function () {
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.'); arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
} }
}); });

View File

@ -120,14 +120,12 @@ function startReadLockOnLeader (endpoint, database, collName, timeout) {
const id = r.id; const id = r.id;
var body = { 'id': id, 'collection': collName, 'ttl': timeout }; var body = { 'id': id, 'collection': collName, 'ttl': timeout };
r = request({ url: url + '/_api/replication/holdReadLockCollection', request({ url: url + '/_api/replication/holdReadLockCollection',
body: JSON.stringify(body), body: JSON.stringify(body),
method: 'POST', headers: {'x-arango-async': true} }); method: 'POST', headers: {'x-arango-async': true} });
if (r.status !== 202) { // Intentionally do not look at the outcome, even in case of an error
console.error('startReadLockOnLeader: Could not start read lock for shard', // we must make sure that the read lock on the leader is not active!
collName, r); // This is done automatically below.
return false;
}
var count = 0; var count = 0;
while (++count < 20) { // wait for some time until read lock established: while (++count < 20) { // wait for some time until read lock established:
@ -170,7 +168,10 @@ function startReadLockOnLeader (endpoint, database, collName, timeout) {
// ///////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////
function cancelReadLockOnLeader (endpoint, database, lockJobId) { function cancelReadLockOnLeader (endpoint, database, lockJobId) {
var url = endpointToURL(endpoint) + '/_db/' + database + // Note that we always use the _system database here because the actual
// database might be gone already on the leader and we need to cancel
// the read lock under all circumstances.
var url = endpointToURL(endpoint) + '/_db/_system' +
'/_api/replication/holdReadLockCollection'; '/_api/replication/holdReadLockCollection';
var r; var r;
var body = {'id': lockJobId}; var body = {'id': lockJobId};
@ -181,7 +182,8 @@ function cancelReadLockOnLeader (endpoint, database, lockJobId) {
return false; return false;
} }
if (r.status !== 200) { if (r.status !== 200) {
console.error('cancelReadLockOnLeader: error', r); console.error('cancelReadLockOnLeader: error', lockJobId, r.status,
r.message, r.body, r.json);
return false; return false;
} }
console.debug('cancelReadLockOnLeader: success'); console.debug('cancelReadLockOnLeader: success');
@ -453,6 +455,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
// synchronize this shard from the leader // synchronize this shard from the leader
// this function will throw if anything goes wrong // this function will throw if anything goes wrong
var startTime = new Date();
var isStopping = require('internal').isStopping; var isStopping = require('internal').isStopping;
var ourselves = global.ArangoServerState.id(); var ourselves = global.ArangoServerState.id();
@ -485,8 +488,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
planned[0] !== leader) { planned[0] !== leader) {
// Things have changed again, simply terminate: // Things have changed again, simply terminate:
terminateAndStartOther(); terminateAndStartOther();
console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s', let endTime = new Date();
database, shard, database, planId); console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s, started %s, ended %s',
database, shard, database, planId, startTime.toString(), endTime.toString());
return; return;
} }
var current = []; var current = [];
@ -500,8 +504,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
} }
// We are already there, this is rather strange, but never mind: // We are already there, this is rather strange, but never mind:
terminateAndStartOther(); terminateAndStartOther();
console.debug('synchronizeOneShard: already done, %s/%s, %s/%s', let endTime = new Date();
database, shard, database, planId); console.debug('synchronizeOneShard: already done, %s/%s, %s/%s, started %s, ended %s',
database, shard, database, planId, startTime.toString(), endTime.toString());
return; return;
} }
console.debug('synchronizeOneShard: waiting for leader, %s/%s, %s/%s', console.debug('synchronizeOneShard: waiting for leader, %s/%s, %s/%s',
@ -522,9 +527,16 @@ function synchronizeOneShard (database, shard, planId, leader) {
if (isStopping()) { if (isStopping()) {
throw 'server is shutting down'; throw 'server is shutting down';
} }
let startTime = new Date();
sy = rep.syncCollection(shard, sy = rep.syncCollection(shard,
{ endpoint: ep, incremental: true, { endpoint: ep, incremental: true,
keepBarrier: true, useCollectionId: false }); keepBarrier: true, useCollectionId: false });
let endTime = new Date();
let longSync = false;
if (endTime - startTime > 5000) {
console.error('synchronizeOneShard: long call to syncCollection for shard', shard, JSON.stringify(sy), "start time: ", startTime.toString(), "end time: ", endTime.toString());
longSync = true;
}
if (sy.error) { if (sy.error) {
console.error('synchronizeOneShard: could not initially synchronize', console.error('synchronizeOneShard: could not initially synchronize',
'shard ', shard, sy); 'shard ', shard, sy);
@ -532,7 +544,15 @@ function synchronizeOneShard (database, shard, planId, leader) {
} else { } else {
if (sy.collections.length === 0 || if (sy.collections.length === 0 ||
sy.collections[0].name !== shard) { sy.collections[0].name !== shard) {
if (longSync) {
console.error('synchronizeOneShard: long sync, before cancelBarrier',
new Date().toString());
}
cancelBarrier(ep, database, sy.barrierId); cancelBarrier(ep, database, sy.barrierId);
if (longSync) {
console.error('synchronizeOneShard: long sync, after cancelBarrier',
new Date().toString());
}
throw 'Shard ' + shard + ' seems to be gone from leader!'; throw 'Shard ' + shard + ' seems to be gone from leader!';
} else { } else {
// Now start a read transaction to stop writes: // Now start a read transaction to stop writes:
@ -592,14 +612,17 @@ function synchronizeOneShard (database, shard, planId, leader) {
} else if (err2 && err2.errorNum === 1402 && err2.errorMessage.match(/HTTP 404/)) { } else if (err2 && err2.errorNum === 1402 && err2.errorMessage.match(/HTTP 404/)) {
logLevel = 'debug'; logLevel = 'debug';
} }
console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s", let endTime = new Date();
database, shard, database, planId, JSON.stringify(err2)); console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s, started: %s, ended: %s",
database, shard, database, planId, JSON.stringify(err2),
startTime.toString(), endTime.toString());
} }
} }
// Tell others that we are done: // Tell others that we are done:
terminateAndStartOther(); terminateAndStartOther();
console.debug('synchronizeOneShard: done, %s/%s, %s/%s', let endTime = new Date();
database, shard, database, planId); console.debug('synchronizeOneShard: done, %s/%s, %s/%s, started: %s, ended: %s',
database, shard, database, planId, startTime.toString(), endTime.toString());
} }
// ///////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////

View File

@ -55,6 +55,7 @@ function optimizerRuleTestSuite() {
var ruleName = "geoindex"; var ruleName = "geoindex";
var colName = "UnitTestsAqlOptimizer" + ruleName.replace(/-/g, "_"); var colName = "UnitTestsAqlOptimizer" + ruleName.replace(/-/g, "_");
var colName2 = colName2;
var geocol; var geocol;
var sortArray = function (l, r) { var sortArray = function (l, r) {
@ -124,11 +125,21 @@ function optimizerRuleTestSuite() {
internal.db._drop(colName); internal.db._drop(colName);
geocol = internal.db._create(colName); geocol = internal.db._create(colName);
geocol.ensureIndex({type:"geo", fields:["lat","lon"]}); geocol.ensureIndex({type:"geo", fields:["lat","lon"]});
for (var lat=-40; lat <=40 ; ++lat){ var lat, lon;
for (var lon=-40; lon <= 40; ++lon){ for (lat=-40; lat <=40 ; ++lat) {
for (lon=-40; lon <= 40; ++lon) {
geocol.insert({lat,lon}); geocol.insert({lat,lon});
} }
} }
internal.db._drop(colName2);
geocol = internal.db._create(colName2);
geocol.ensureIndex({type:"geo", fields:["loca.tion.lat","loca.tion.lon"]});
for (lat=-40; lat <=40 ; ++lat) {
for (lon=-40; lon <= 40; ++lon) {
geocol.insert({ loca : { tion : { lat , lon } } });
}
}
}, },
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -137,6 +148,7 @@ function optimizerRuleTestSuite() {
tearDown : function () { tearDown : function () {
internal.db._drop(colName); internal.db._drop(colName);
internal.db._drop(colName2);
geocol = null; geocol = null;
}, },
@ -145,7 +157,13 @@ function optimizerRuleTestSuite() {
geocol.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false }); geocol.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
var queries = [ var queries = [
{ string : "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d", { string : "FOR d IN " + colName + " SORT distance(d.lat, d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
cluster : false,
sort : false,
filter : false,
index : true
},
{ string : "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat, d.loca.tion.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
cluster : false, cluster : false,
sort : false, sort : false,
filter : false, filter : false,
@ -213,11 +231,15 @@ function optimizerRuleTestSuite() {
testRuleRemoveNodes : function () { testRuleRemoveNodes : function () {
if(enabled.removeNodes){ if(enabled.removeNodes){
var queries = [ var queries = [
[ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ], [ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ],
[ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ], [ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ],
[ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ], [ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ],
// [ "FOR i IN 1..2 FOR d IN geocol SORT distance(i,2,d.lat,d.lon) ASC LIMIT 5 RETURN d", false, false, false ], // [ "FOR i IN 1..2 FOR d IN geocol SORT distance(i,2,d.lat,d.lon) ASC LIMIT 5 RETURN d", false, false, false ],
]; ];
var queries2 = [
[ "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat,d.loca.tion.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ]
];
var expected = [ var expected = [
[[0,0], [-1,0], [0,1], [1,0], [0,-1]], [[0,0], [-1,0], [0,1], [1,0], [0,-1]],
@ -234,6 +256,16 @@ function optimizerRuleTestSuite() {
assertEqual(expected[qindex].sort(),pairs.sort()); assertEqual(expected[qindex].sort(),pairs.sort());
//expect(expected[qindex].sort()).to.be.equal(result.json.sort()) //expect(expected[qindex].sort()).to.be.equal(result.json.sort())
}); });
queries2.forEach(function(query, qindex) {
var result = AQL_EXECUTE(query[0]);
expect(expected[qindex].length).to.be.equal(result.json.length);
var pairs = result.json.map(function(res){
return [res.loca.tion.lat,res.loca.tion.lon];
});
assertEqual(expected[qindex].sort(),pairs.sort());
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
});
} }
}, // testRuleSort }, // testRuleSort

View File

@ -280,7 +280,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
// hostname and port (e.g. [address]:port) // hostname and port (e.g. [address]:port)
if (found != std::string::npos && found > 2 && found + 2 < copy.size()) { if (found != std::string::npos && found > 2 && found + 2 < copy.size()) {
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 2)); int64_t value = StringUtils::int64(copy.substr(found + 2));
// check port over-/underrun
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
return nullptr;
}
uint16_t port = static_cast<uint16_t>(value);
std::string host = copy.substr(1, found - 1); std::string host = copy.substr(1, found - 1);
return new EndpointIpV6(type, protocol, encryption, listenBacklog, return new EndpointIpV6(type, protocol, encryption, listenBacklog,
@ -306,7 +312,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
// hostname and port // hostname and port
if (found != std::string::npos && found + 1 < copy.size()) { if (found != std::string::npos && found + 1 < copy.size()) {
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 1)); int64_t value = StringUtils::int64(copy.substr(found + 1));
// check port over-/underrun
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
return nullptr;
}
uint16_t port = static_cast<uint16_t>(value);
std::string host = copy.substr(0, found); std::string host = copy.substr(0, found);
return new EndpointIpV4(type, protocol, encryption, listenBacklog, return new EndpointIpV4(type, protocol, encryption, listenBacklog,

View File

@ -88,8 +88,9 @@ class LoggerStream {
size_t i = 0; size_t i = 0;
size_t const n = obj.size(); size_t const n = obj.size();
for (auto const& it : obj) { for (auto const& it : obj) {
_out << it;
if (++i < n) { if (++i < n) {
_out << it << ", "; _out << ", ";
} }
} }
_out << ']'; _out << ']';
@ -102,8 +103,9 @@ class LoggerStream {
size_t i = 0; size_t i = 0;
size_t const n = obj.size(); size_t const n = obj.size();
for (auto const& it : obj) { for (auto const& it : obj) {
_out << it;
if (++i < n) { if (++i < n) {
_out << it << ", "; _out << ", ";
} }
} }
_out << '}'; _out << '}';
@ -116,8 +118,9 @@ class LoggerStream {
size_t i = 0; size_t i = 0;
size_t n = obj.size(); size_t n = obj.size();
for (auto const& it : obj) { for (auto const& it : obj) {
_out << it;
if (++i < n) { if (++i < n) {
_out << it << ", "; _out << ", ";
} }
_out << it.first << " => " << it.second; _out << it.first << " => " << it.second;
} }

View File

@ -0,0 +1,66 @@
#!/bin/bash
##python3-setuptools
##
##python setup.py install
##
##node npm
##
##https://github.com/GitbookIO/gitbook
## npm install gitbook-cli -g
##
## http://calibre-ebook.com/download
test_tools(){
if ! type easy_install3 >> /dev/null; then
echo "you are missing setuptools"
echo "apt-get install python-setuptools"
exit 1
fi
if ! type node >> /dev/null; then
echo "you are missing node"
echo "apt-get install nodejs nodejs-legacy"
exit 1
fi
if ! type npm >> /dev/null; then
echo "you are missing node"
echo "apt-get install npm"
exit 1
fi
if ! type calibre >> /dev/null; then
echo "you are missing node"
echo "apt-get install calibre-bin"
exit 1
fi
}
install_tools(){
(
if ! [[ -f markdown-pp ]]; then
git clone https://github.com/arangodb-helper/markdown-pp/
fi
cd markdown-pp
python2 setup.py install --user
)
npm install gitbook-cli
}
main(){
#test for basic tools
test_tools
#cd into target dir
mkdir -p "$1"
cd $1 || { echo "unable to change into $1"; exit 1; }
install_tools
}
main "$@"