mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api
This commit is contained in:
commit
ab11410911
25
CHANGELOG
25
CHANGELOG
|
@ -1,16 +1,39 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
|
||||
v3.2.alpha1 (2017-02-05)
|
||||
------------------------
|
||||
|
||||
* added figure `httpRequests` to AQL query statistics
|
||||
|
||||
* removed revisions cache intermediate layer implementation
|
||||
|
||||
* obsoleted startup options `--database.revision-cache-chunk-size` and
|
||||
`--database.revision-cache-target-size`
|
||||
|
||||
* fix potential port number over-/underruns
|
||||
|
||||
* added startup option `--log.shorten-filenames` for controlling whether filenames
|
||||
in log message should be shortened to just the filename with the absolute path
|
||||
|
||||
* removed IndexThreadFeature, made --database.index-threads option obsolete
|
||||
* removed IndexThreadFeature, made `--database.index-threads` option obsolete
|
||||
|
||||
* changed index filling to make it more parallel, dispatch tasks to boost::asio
|
||||
|
||||
* more detailed stacktraces in Foxx apps
|
||||
|
||||
|
||||
v3.1.11 (XXXX-XX-XX)
|
||||
--------------------
|
||||
|
||||
* fixed sort issue in cluster, occurring when one of the local sort buffers of a
|
||||
GatherNode was empty
|
||||
|
||||
* reduce number of HTTP requests made for certain kinds of join queries in cluster,
|
||||
leading to speedup of some join queries
|
||||
|
||||
|
||||
v3.1.10 (2017-XX-XX)
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -489,6 +489,8 @@ if (USE_MAINTAINER_MODE)
|
|||
find_program(AWK_EXECUTABLE awk)
|
||||
endif ()
|
||||
|
||||
find_program(FILE_EXECUTABLE file)
|
||||
|
||||
################################################################################
|
||||
## FAILURE TESTS
|
||||
################################################################################
|
||||
|
|
|
@ -90,6 +90,42 @@ Create a geo index for a hash array attribute:
|
|||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock geoIndexCreateForArrayAttribute2
|
||||
|
||||
Use GeoIndex with AQL SORT statement:
|
||||
|
||||
@startDocuBlockInline geoIndexSortOptimization
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexSortOptimization}
|
||||
~db._create("geoSort")
|
||||
db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
|
||||
| for (i = -90; i <= 90; i += 10) {
|
||||
| for (j = -180; j <= 180; j += 10) {
|
||||
| db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
|
||||
| }
|
||||
}
|
||||
var query = "FOR doc in geoSort SORT distance(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc"
|
||||
db._explain(query, {}, {colors: false});
|
||||
db._query(query);
|
||||
~db._drop("geoSort")
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock geoIndexSortOptimization
|
||||
|
||||
Use GeoIndex with AQL FILTER statement:
|
||||
|
||||
@startDocuBlockInline geoIndexFilterOptimization
|
||||
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexFilterOptimization}
|
||||
~db._create("geoFilter")
|
||||
db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
|
||||
| for (i = -90; i <= 90; i += 10) {
|
||||
| for (j = -180; j <= 180; j += 10) {
|
||||
| db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
|
||||
| }
|
||||
}
|
||||
var query = "FOR doc in geoFilter FILTER distance(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc"
|
||||
db._explain(query, {}, {colors: false});
|
||||
db._query(query);
|
||||
~db._drop("geoFilter")
|
||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||
@endDocuBlock geoIndexFilterOptimization
|
||||
|
||||
|
||||
<!-- js/common/modules/@arangodb/arango-collection-common.js-->
|
||||
@startDocuBlock collectionGeo
|
||||
|
|
|
@ -273,8 +273,10 @@ The geo index provides operations to find documents with coordinates nearest to
|
|||
comparison coordinate, and to find documents with coordinates that are within a specifiable
|
||||
radius around a comparison coordinate.
|
||||
|
||||
The geo index is used via dedicated functions in AQL or the simple queries functions,
|
||||
but will not be used for other types of queries or conditions.
|
||||
The geo index is used via dedicated functions in AQL, the simple queries
|
||||
functions and it is implicitly applied when in AQL a SORT or FILTER is used with
|
||||
the distance function. Otherwise it will not be used for other types of queries
|
||||
or conditions.
|
||||
|
||||
|
||||
### Fulltext Index
|
||||
|
|
|
@ -71,7 +71,10 @@ different usage scenarios:
|
|||
|
||||
{ "coords": [ 50.9406645, 6.9599115 ] }
|
||||
|
||||
Geo indexes will only be invoked via special functions.
|
||||
Geo indexes will be invoked via special functions or AQL optimization. The
|
||||
optimization can be triggered when a collection with geo index is enumerated
|
||||
and a SORT or FILTER statement is used in conjunction with the distance
|
||||
function.
|
||||
|
||||
- fulltext index: a fulltext index can be used to index all words contained in
|
||||
a specific attribute of all documents in a collection. Only words with a
|
||||
|
|
|
@ -120,11 +120,13 @@ fi
|
|||
VERSION_MAJOR=`echo $VERSION | awk -F. '{print $1}'`
|
||||
VERSION_MINOR=`echo $VERSION | awk -F. '{print $2}'`
|
||||
VERSION_REVISION=`echo $VERSION | awk -F. '{print $3}'`
|
||||
VERSION_PACKAGE="1"
|
||||
|
||||
cat CMakeLists.txt \
|
||||
| sed -e "s~set(ARANGODB_VERSION_MAJOR.*~set(ARANGODB_VERSION_MAJOR \"$VERSION_MAJOR\")~" \
|
||||
| sed -e "s~set(ARANGODB_VERSION_MINOR.*~set(ARANGODB_VERSION_MINOR \"$VERSION_MINOR\")~" \
|
||||
| sed -e "s~set(ARANGODB_VERSION_REVISION.*~set(ARANGODB_VERSION_REVISION \"$VERSION_REVISION\")~" \
|
||||
| sed -e "s~set(ARANGODB_PACKAGE_REVISION.*~set(ARANGODB_PACKAGE_REVISION \"$VERSION_PACKAGE\")~" \
|
||||
> CMakeLists.txt.tmp
|
||||
|
||||
mv CMakeLists.txt.tmp CMakeLists.txt
|
||||
|
|
|
@ -49,8 +49,6 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
|
|||
// --SECTION-- macros
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
#define DELETE_ENDPOINT(e) if (e != 0) delete e;
|
||||
|
||||
#define FACTORY_NAME(name) name ## Factory
|
||||
|
||||
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
|
||||
|
@ -58,12 +56,12 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
|
|||
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
|
||||
e = FACTORY(type, specification); \
|
||||
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
|
||||
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
|
||||
e = arangodb::Endpoint::serverFactory(specification, 1, true); \
|
||||
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- setup / tear-down
|
||||
|
@ -118,6 +116,11 @@ BOOST_AUTO_TEST_CASE (EndpointInvalid) {
|
|||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
|
||||
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65536"));
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65537"));
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:-1"));
|
||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:6555555555"));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -491,7 +494,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer1) {
|
|||
|
||||
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -503,7 +506,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer2) {
|
|||
|
||||
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -516,7 +519,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer3) {
|
|||
|
||||
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -529,7 +532,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient1) {
|
|||
|
||||
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -541,7 +544,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient2) {
|
|||
|
||||
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -554,7 +557,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient3) {
|
|||
|
||||
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -575,7 +578,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerTcpIpv4WithPort) {
|
|||
BOOST_CHECK_EQUAL(667, e->port());
|
||||
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -596,7 +599,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerUnix) {
|
|||
BOOST_CHECK_EQUAL(0, e->port());
|
||||
BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -617,7 +620,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientSslIpV6WithPortHttp) {
|
|||
BOOST_CHECK_EQUAL(43425, e->port());
|
||||
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -637,7 +640,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientTcpIpv6WithoutPort) {
|
|||
BOOST_CHECK_EQUAL(8529, e->port());
|
||||
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
|
||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||
DELETE_ENDPOINT(e);
|
||||
delete e;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
using namespace arangodb::basics;
|
||||
|
||||
static bool Initialized = false;
|
||||
static uint64_t counter = 0;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- setup / tear-down
|
||||
|
@ -73,8 +74,6 @@ struct CFilesSetup {
|
|||
}
|
||||
|
||||
StringBuffer* writeFile (const char* blob) {
|
||||
static uint64_t counter = 0;
|
||||
|
||||
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
|
||||
filename->appendText(_directory);
|
||||
filename->appendText("/tmp-");
|
||||
|
@ -108,6 +107,71 @@ struct CFilesSetup {
|
|||
|
||||
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
|
||||
|
||||
BOOST_AUTO_TEST_CASE (tst_createdirectory) {
|
||||
std::ostringstream out;
|
||||
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||
|
||||
std::string filename = out.str();
|
||||
long unused1;
|
||||
std::string unused2;
|
||||
int res = TRI_CreateDirectory(filename.c_str(), unused1, unused2);
|
||||
BOOST_CHECK_EQUAL(0, res);
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename.c_str()));
|
||||
|
||||
res = TRI_RemoveDirectory(filename.c_str());
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename.c_str()));
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE (tst_createdirectoryrecursive) {
|
||||
std::ostringstream out;
|
||||
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||
|
||||
std::string filename1 = out.str();
|
||||
out << "/abc";
|
||||
std::string filename2 = out.str();
|
||||
|
||||
long unused1;
|
||||
std::string unused2;
|
||||
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
|
||||
BOOST_CHECK_EQUAL(0, res);
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
|
||||
|
||||
res = TRI_RemoveDirectory(filename1.c_str());
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE (tst_removedirectorydeterministic) {
|
||||
std::ostringstream out;
|
||||
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||
|
||||
std::string filename1 = out.str();
|
||||
out << "/abc";
|
||||
std::string filename2 = out.str();
|
||||
|
||||
long unused1;
|
||||
std::string unused2;
|
||||
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
|
||||
BOOST_CHECK_EQUAL(0, res);
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
|
||||
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
|
||||
|
||||
res = TRI_RemoveDirectoryDeterministic(filename1.c_str());
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
|
||||
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test file exists
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -116,6 +180,7 @@ BOOST_AUTO_TEST_CASE (tst_existsfile) {
|
|||
StringBuffer* filename = writeFile("");
|
||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
|
||||
TRI_UnlinkFile(filename->c_str());
|
||||
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename->c_str()));
|
||||
|
||||
delete filename;
|
||||
}
|
||||
|
|
|
@ -25,11 +25,7 @@
|
|||
#include "Agency/Agent.h"
|
||||
#include "Agency/Job.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId, std::string const& creator,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
|
@ -54,7 +54,6 @@
|
|||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::application_features;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::httpclient;
|
||||
using namespace arangodb::rest;
|
||||
|
||||
|
@ -450,7 +449,7 @@ std::string AgencyCommManager::path(std::string const& p1) {
|
|||
return "";
|
||||
}
|
||||
|
||||
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/");
|
||||
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/");
|
||||
}
|
||||
|
||||
std::string AgencyCommManager::path(std::string const& p1,
|
||||
|
@ -459,8 +458,8 @@ std::string AgencyCommManager::path(std::string const& p1,
|
|||
return "";
|
||||
}
|
||||
|
||||
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/") + "/" +
|
||||
StringUtils::trim(p2, "/");
|
||||
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/") + "/" +
|
||||
basics::StringUtils::trim(p2, "/");
|
||||
}
|
||||
|
||||
std::string AgencyCommManager::generateStamp() {
|
||||
|
@ -674,7 +673,7 @@ void AgencyCommManager::removeEndpoint(std::string const& endpoint) {
|
|||
}
|
||||
|
||||
std::string AgencyCommManager::endpointsString() const {
|
||||
return StringUtils::join(endpoints(), ", ");
|
||||
return basics::StringUtils::join(endpoints(), ", ");
|
||||
}
|
||||
|
||||
std::vector<std::string> AgencyCommManager::endpoints() const {
|
||||
|
@ -1280,7 +1279,7 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) {
|
|||
for (const auto& i : VPackObjectIterator(current)) {
|
||||
auto const endpoint = Endpoint::unifiedForm(i.value.copyString());
|
||||
if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) {
|
||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< "Adding endpoint " << endpoint << " to agent pool";
|
||||
AgencyCommManager::MANAGER->addEndpoint(endpoint);
|
||||
}
|
||||
|
@ -1391,7 +1390,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
b.add(VPackValue(clientId));
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
|
||||
"Failed agency comm (" << result._statusCode << ")! " <<
|
||||
"Inquiring about clientId " << clientId << ".";
|
||||
|
||||
|
@ -1410,25 +1409,25 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
for (auto const& i : VPackArrayIterator(inner)) {
|
||||
if (i.isUInt()) {
|
||||
if (i.getUInt() == 0) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||
<< body << " failed: " << outer.toJson();
|
||||
return result;
|
||||
} else {
|
||||
success = true;
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||
<< body << " failed with " << outer.toJson();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (success) {
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||
<< body << " succeeded (" << outer.toJson() << ")";
|
||||
return inq;
|
||||
} else {
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||
<< body << " failed (" << outer.toJson() << ")";
|
||||
return result;
|
||||
}
|
||||
|
@ -1437,7 +1436,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
}
|
||||
return inq;
|
||||
} else {
|
||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
|
||||
"Inquiry failed (" << inq._statusCode << "). Keep trying ...";
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ AgencyFeature::AgencyFeature(application_features::ApplicationServer* server)
|
|||
_supervision(false),
|
||||
_waitForSync(true),
|
||||
_supervisionFrequency(5.0),
|
||||
_compactionStepSize(2000),
|
||||
_compactionStepSize(200000),
|
||||
_compactionKeepSize(500),
|
||||
_supervisionGracePeriod(15.0),
|
||||
_cmdLineTimings(false)
|
||||
|
@ -232,7 +232,7 @@ void AgencyFeature::start() {
|
|||
|
||||
_agent.reset(new consensus::Agent(consensus::config_t(
|
||||
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
|
||||
_agencyEndpoints, _supervision, _waitForSync, _supervisionFrequency,
|
||||
_agencyEndpoints, _supervision, false, _supervisionFrequency,
|
||||
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
|
||||
_cmdLineTimings)));
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ bool Agent::recvAppendEntriesRPC(
|
|||
term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm,
|
||||
index_t leaderCommitIndex, query_t const& queries) {
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got AppendEntriesRPC from "
|
||||
LOG_TOPIC(TRACE, Logger::AGENCY) << "Got AppendEntriesRPC from "
|
||||
<< leaderId << " with term " << term;
|
||||
|
||||
// Update commit index
|
||||
|
@ -276,40 +276,34 @@ bool Agent::recvAppendEntriesRPC(
|
|||
size_t nqs = queries->slice().length();
|
||||
|
||||
// State machine, _lastCommitIndex to advance atomically
|
||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||
|
||||
if (nqs > 0) {
|
||||
|
||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||
|
||||
size_t ndups = _state.removeConflicts(queries);
|
||||
|
||||
|
||||
if (nqs > ndups) {
|
||||
LOG_TOPIC(TRACE, Logger::AGENCY)
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Appending " << nqs - ndups << " entries to state machine. ("
|
||||
<< nqs << ", " << ndups << ")";
|
||||
|
||||
<< nqs << ", " << ndups << "): " << queries->slice().toJson() ;
|
||||
|
||||
try {
|
||||
_state.log(queries, ndups);
|
||||
|
||||
_lastCommitIndex = _state.log(queries, ndups);
|
||||
|
||||
if (_lastCommitIndex >= _nextCompationAfter) {
|
||||
_state.compact(_lastCommitIndex);
|
||||
_nextCompationAfter += _config.compactionStepSize();
|
||||
}
|
||||
|
||||
} catch (std::exception const&) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Malformed query: " << __FILE__ << __LINE__;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_spearhead.apply(
|
||||
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
|
||||
_constituent.term());
|
||||
|
||||
_readDB.apply(
|
||||
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
|
||||
_constituent.term());
|
||||
|
||||
_lastCommitIndex = leaderCommitIndex;
|
||||
|
||||
if (_lastCommitIndex >= _nextCompationAfter) {
|
||||
_state.compact(_lastCommitIndex);
|
||||
_nextCompationAfter += _config.compactionStepSize();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -348,7 +342,7 @@ void Agent::sendAppendEntriesRPC() {
|
|||
duration<double> m = system_clock::now() - _lastSent[followerId];
|
||||
|
||||
if (highest == _lastHighest[followerId] &&
|
||||
m.count() < 0.5 * _config.minPing()) {
|
||||
m.count() < 0.25 * _config.minPing()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1122,10 +1116,10 @@ bool Agent::rebuildDBs() {
|
|||
|
||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||
|
||||
_spearhead.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
|
||||
_constituent.term());
|
||||
_readDB.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
|
||||
_constituent.term());
|
||||
_spearhead.apply(
|
||||
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
|
||||
_readDB.apply(
|
||||
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Agency/MoveShard.h"
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId,
|
||||
|
|
|
@ -468,11 +468,13 @@ void Constituent::callElection() {
|
|||
void Constituent::update(std::string const& leaderID, term_t t) {
|
||||
MUTEX_LOCKER(guard, _castLock);
|
||||
_term = t;
|
||||
|
||||
if (_leaderID != leaderID) {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Constituent::update: setting _leaderID to " << leaderID
|
||||
<< " in term " << _term;
|
||||
_leaderID = leaderID;
|
||||
_role = FOLLOWER;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,6 +548,11 @@ void Constituent::run() {
|
|||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID
|
||||
<< " in term " << _term;
|
||||
} else {
|
||||
|
||||
{
|
||||
MUTEX_LOCKER(guard, _castLock);
|
||||
_role = FOLLOWER;
|
||||
}
|
||||
while (!this->isStopping()) {
|
||||
if (_role == FOLLOWER) {
|
||||
static double const M = 1.0e6;
|
||||
|
|
|
@ -75,7 +75,7 @@ bool FailedFollower::create() {
|
|||
}
|
||||
}
|
||||
|
||||
_jb = std::make_shared<velocypack::Builder>();
|
||||
_jb = std::make_shared<Builder>();
|
||||
_jb->openArray();
|
||||
_jb->openObject();
|
||||
|
||||
|
@ -128,7 +128,7 @@ bool FailedFollower::start() {
|
|||
|
||||
|
||||
// Copy todo to pending
|
||||
velocypack::Builder todo, pending;
|
||||
Builder todo, pending;
|
||||
|
||||
// Get todo entry
|
||||
todo.openArray();
|
||||
|
@ -254,7 +254,7 @@ JOB_STATUS FailedFollower::status() {
|
|||
|
||||
if (compareServerLists(planned.slice(), current.slice())) {
|
||||
// Remove shard from /arango/Target/FailedServers/<server> array
|
||||
velocypack::Builder del;
|
||||
Builder del;
|
||||
del.openArray();
|
||||
del.openObject();
|
||||
std::string path = _agencyPrefix + failedServersPrefix + "/" + _from;
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "Agency/Job.h"
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId, std::string const& creator,
|
||||
|
|
|
@ -58,7 +58,7 @@ bool FailedServer::start() {
|
|||
<< "Start FailedServer job " + _jobId + " for server " + _server;
|
||||
|
||||
// Copy todo to pending
|
||||
velocypack::Builder todo, pending;
|
||||
Builder todo, pending;
|
||||
|
||||
// Get todo entry
|
||||
todo.openArray();
|
||||
|
@ -210,7 +210,7 @@ bool FailedServer::create() {
|
|||
|
||||
std::string path = _agencyPrefix + toDoPrefix + _jobId;
|
||||
|
||||
_jb = std::make_shared<velocypack::Builder>();
|
||||
_jb = std::make_shared<Builder>();
|
||||
_jb->openArray();
|
||||
_jb->openObject();
|
||||
|
||||
|
@ -271,7 +271,7 @@ JOB_STATUS FailedServer::status() {
|
|||
// mop: ohhh...server is healthy again!
|
||||
bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD;
|
||||
|
||||
std::shared_ptr<velocypack::Builder> deleteTodos;
|
||||
std::shared_ptr<Builder> deleteTodos;
|
||||
|
||||
Node::Children const todos = _snapshot(toDoPrefix).children();
|
||||
Node::Children const pends = _snapshot(pendingPrefix).children();
|
||||
|
@ -281,7 +281,7 @@ JOB_STATUS FailedServer::status() {
|
|||
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
|
||||
if (serverHealthy) {
|
||||
if (!deleteTodos) {
|
||||
deleteTodos.reset(new velocypack::Builder());
|
||||
deleteTodos.reset(new Builder());
|
||||
deleteTodos->openArray();
|
||||
deleteTodos->openObject();
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ void Inception::gossip() {
|
|||
auto const version = config.version();
|
||||
|
||||
// Build gossip message
|
||||
auto out = std::make_shared<velocypack::Builder>();
|
||||
auto out = std::make_shared<Builder>();
|
||||
out->openObject();
|
||||
out->add("endpoint", VPackValue(config.endpoint()));
|
||||
out->add("id", VPackValue(config.id()));
|
||||
|
@ -169,7 +169,7 @@ bool Inception::restartingActiveAgent() {
|
|||
auto const& clientEp = myConfig.endpoint();
|
||||
auto const majority = (myConfig.size()+1)/2;
|
||||
|
||||
velocypack::Builder greeting;
|
||||
Builder greeting;
|
||||
{
|
||||
VPackObjectBuilder b(&greeting);
|
||||
greeting.add(clientId, VPackValue(clientEp));
|
||||
|
@ -259,7 +259,7 @@ bool Inception::restartingActiveAgent() {
|
|||
}
|
||||
}
|
||||
|
||||
auto agency = std::make_shared<velocypack::Builder>();
|
||||
auto agency = std::make_shared<Builder>();
|
||||
agency->openObject();
|
||||
agency->add("term", theirConfig.get("term"));
|
||||
agency->add("id", VPackValue(theirLeaderId));
|
||||
|
@ -435,7 +435,7 @@ bool Inception::estimateRAFTInterval() {
|
|||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "mean(" << mean << ") stdev(" << stdev<< ")";
|
||||
|
||||
velocypack::Builder measurement;
|
||||
Builder measurement;
|
||||
measurement.openObject();
|
||||
measurement.add("mean", VPackValue(mean));
|
||||
measurement.add("stdev", VPackValue(stdev));
|
||||
|
@ -541,8 +541,10 @@ void Inception::run() {
|
|||
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
|
||||
_agent->ready(true);
|
||||
} else {
|
||||
if (!this->isStopping()) {
|
||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||
<< "Unable to restart with persisted pool. Fatal exit.";
|
||||
}
|
||||
FATAL_ERROR_EXIT();
|
||||
// FATAL ERROR
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
using namespace arangodb::consensus;
|
||||
|
||||
bool arangodb::consensus::compareServerLists(velocypack::Slice plan, velocypack::Slice current) {
|
||||
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
|
||||
if (!plan.isArray() || !current.isArray()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ JOB_STATUS Job::exists() const {
|
|||
bool Job::finish(std::string const& type, bool success,
|
||||
std::string const& reason) const {
|
||||
|
||||
velocypack::Builder pending, finished;
|
||||
Builder pending, finished;
|
||||
|
||||
// Get todo entry
|
||||
pending.openArray();
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Node.h"
|
||||
#include "Supervision.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -42,7 +41,7 @@ namespace consensus {
|
|||
// and all others followers. Both arguments must be arrays. Returns true,
|
||||
// if the first items in both slice are equal and if both arrays contain
|
||||
// the same set of strings.
|
||||
bool compareServerLists(velocypack::Slice plan, velocypack::Slice current);
|
||||
bool compareServerLists(Slice plan, Slice current);
|
||||
|
||||
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
||||
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
||||
|
@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers";
|
|||
static std::string const healthPrefix = "/Supervision/Health/";
|
||||
|
||||
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
|
||||
velocypack::Builder const& transaction,
|
||||
Builder const& transaction,
|
||||
bool waitForCommit = true) {
|
||||
query_t envelope = std::make_shared<velocypack::Builder>();
|
||||
query_t envelope = std::make_shared<Builder>();
|
||||
|
||||
try {
|
||||
envelope->openArray();
|
||||
|
@ -138,7 +137,7 @@ struct Job {
|
|||
std::string _creator;
|
||||
std::string _agencyPrefix;
|
||||
|
||||
std::shared_ptr<velocypack::Builder> _jb;
|
||||
std::shared_ptr<Builder> _jb;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
static std::string const DBServer = "DBServer";
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId, std::string const& creator,
|
||||
|
|
|
@ -33,9 +33,8 @@
|
|||
#include <deque>
|
||||
#include <regex>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::basics;
|
||||
|
||||
struct NotEmpty {
|
||||
bool operator()(const std::string& s) { return !s.empty(); }
|
||||
|
@ -89,16 +88,16 @@ Node::Node(std::string const& name, Store* store)
|
|||
Node::~Node() {}
|
||||
|
||||
/// Get slice to value buffer
|
||||
velocypack::Slice Node::slice() const {
|
||||
Slice Node::slice() const {
|
||||
// Some array
|
||||
if (_isArray) {
|
||||
rebuildVecBuf();
|
||||
return velocypack::Slice(_vecBuf.data());
|
||||
return Slice(_vecBuf.data());
|
||||
}
|
||||
|
||||
// Some value
|
||||
if (!_value.empty()) {
|
||||
return velocypack::Slice(_value.front().data());
|
||||
return Slice(_value.front().data());
|
||||
}
|
||||
|
||||
// Empty object
|
||||
|
@ -107,10 +106,10 @@ velocypack::Slice Node::slice() const {
|
|||
|
||||
void Node::rebuildVecBuf() const {
|
||||
if (_vecBufDirty) { // Dirty vector buffer
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
for (auto const& i : _value) {
|
||||
tmp.add(velocypack::Slice(i.data()));
|
||||
tmp.add(Slice(i.data()));
|
||||
}
|
||||
tmp.close();
|
||||
_vecBuf = *tmp.steal();
|
||||
|
@ -324,7 +323,7 @@ Store& Node::store() { return *(root()._store); }
|
|||
Store const& Node::store() const { return *(root()._store); }
|
||||
|
||||
// velocypack value type of this node
|
||||
velocypack::ValueType Node::valueType() const { return slice().type(); }
|
||||
ValueType Node::valueType() const { return slice().type(); }
|
||||
|
||||
// file time to live entry for this node to now + millis
|
||||
bool Node::addTimeToLive(long millis) {
|
||||
|
@ -359,7 +358,7 @@ namespace consensus {
|
|||
/// Set value
|
||||
template <>
|
||||
bool Node::handle<SET>(VPackSlice const& slice) {
|
||||
VPackSlice val = slice.get("new");
|
||||
Slice val = slice.get("new");
|
||||
|
||||
if (val.isObject()) {
|
||||
if (val.hasKey("op")) { // No longer a keyword but a regular key "op"
|
||||
|
@ -394,12 +393,12 @@ bool Node::handle<SET>(VPackSlice const& slice) {
|
|||
/// Increment integer value or set 1
|
||||
template <>
|
||||
bool Node::handle<INCREMENT>(VPackSlice const& slice) {
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openObject();
|
||||
try {
|
||||
tmp.add("tmp", velocypack::Value(this->slice().getInt() + 1));
|
||||
tmp.add("tmp", Value(this->slice().getInt() + 1));
|
||||
} catch (std::exception const&) {
|
||||
tmp.add("tmp", velocypack::Value(1));
|
||||
tmp.add("tmp", Value(1));
|
||||
}
|
||||
tmp.close();
|
||||
*this = tmp.slice().get("tmp");
|
||||
|
@ -409,12 +408,12 @@ bool Node::handle<INCREMENT>(VPackSlice const& slice) {
|
|||
/// Decrement integer value or set -1
|
||||
template <>
|
||||
bool Node::handle<DECREMENT>(VPackSlice const& slice) {
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openObject();
|
||||
try {
|
||||
tmp.add("tmp", velocypack::Value(this->slice().getInt() - 1));
|
||||
tmp.add("tmp", Value(this->slice().getInt() - 1));
|
||||
} catch (std::exception const&) {
|
||||
tmp.add("tmp", velocypack::Value(-1));
|
||||
tmp.add("tmp", Value(-1));
|
||||
}
|
||||
tmp.close();
|
||||
*this = tmp.slice().get("tmp");
|
||||
|
@ -429,7 +428,7 @@ bool Node::handle<PUSH>(VPackSlice const& slice) {
|
|||
<< slice.toJson();
|
||||
return false;
|
||||
}
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
if (this->slice().isArray()) {
|
||||
for (auto const& old : VPackArrayIterator(this->slice())) tmp.add(old);
|
||||
|
@ -448,7 +447,7 @@ bool Node::handle<ERASE>(VPackSlice const& slice) {
|
|||
<< "Operator erase without value to be erased: " << slice.toJson();
|
||||
return false;
|
||||
}
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
if (this->slice().isArray()) {
|
||||
for (auto const& old : VPackArrayIterator(this->slice())) {
|
||||
|
@ -475,7 +474,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
|
|||
<< slice.toJson();
|
||||
return false;
|
||||
}
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
if (this->slice().isArray()) {
|
||||
for (auto const& old : VPackArrayIterator(this->slice())) {
|
||||
|
@ -494,7 +493,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
|
|||
/// Remove element from end of array.
|
||||
template <>
|
||||
bool Node::handle<POP>(VPackSlice const& slice) {
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
if (this->slice().isArray()) {
|
||||
VPackArrayIterator it(this->slice());
|
||||
|
@ -519,7 +518,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
|
|||
<< slice.toJson();
|
||||
return false;
|
||||
}
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
tmp.add(slice.get("new"));
|
||||
if (this->slice().isArray()) {
|
||||
|
@ -533,7 +532,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
|
|||
/// Remove element from front of array
|
||||
template <>
|
||||
bool Node::handle<SHIFT>(VPackSlice const& slice) {
|
||||
velocypack::Builder tmp;
|
||||
Builder tmp;
|
||||
tmp.openArray();
|
||||
if (this->slice().isArray()) { // If a
|
||||
VPackArrayIterator it(this->slice());
|
||||
|
@ -678,7 +677,7 @@ bool Node::applies(VPackSlice const& slice) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void Node::toBuilder(velocypack::Builder& builder, bool showHidden) const {
|
||||
void Node::toBuilder(Builder& builder, bool showHidden) const {
|
||||
try {
|
||||
if (type() == NODE) {
|
||||
VPackObjectBuilder guard(&builder);
|
||||
|
@ -729,7 +728,7 @@ Node::Children& Node::children() { return _children; }
|
|||
Node::Children const& Node::children() const { return _children; }
|
||||
|
||||
std::string Node::toJson() const {
|
||||
velocypack::Builder builder;
|
||||
Builder builder;
|
||||
builder.openArray();
|
||||
toBuilder(builder);
|
||||
builder.close();
|
||||
|
@ -796,7 +795,7 @@ std::string Node::getString() const {
|
|||
return slice().copyString();
|
||||
}
|
||||
|
||||
velocypack::Slice Node::getArray() const {
|
||||
Slice Node::getArray() const {
|
||||
if (type() == NODE) {
|
||||
throw StoreException("Must not convert NODE type to array");
|
||||
}
|
||||
|
@ -804,6 +803,6 @@ velocypack::Slice Node::getArray() const {
|
|||
throw StoreException("Not an array type");
|
||||
}
|
||||
rebuildVecBuf();
|
||||
return velocypack::Slice(_vecBuf.data());
|
||||
return Slice(_vecBuf.data());
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@ enum Operation {
|
|||
REPLACE
|
||||
};
|
||||
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
class StoreException : public std::exception {
|
||||
public:
|
||||
explicit StoreException(std::string const& message) : _message(message) {}
|
||||
|
@ -159,7 +161,7 @@ class Node {
|
|||
bool handle(arangodb::velocypack::Slice const&);
|
||||
|
||||
/// @brief Create Builder representing this store
|
||||
void toBuilder(velocypack::Builder&, bool showHidden = false) const;
|
||||
void toBuilder(Builder&, bool showHidden = false) const;
|
||||
|
||||
/// @brief Access children
|
||||
Children& children();
|
||||
|
@ -168,10 +170,10 @@ class Node {
|
|||
Children const& children() const;
|
||||
|
||||
/// @brief Create slice from value
|
||||
velocypack::Slice slice() const;
|
||||
Slice slice() const;
|
||||
|
||||
/// @brief Get value type
|
||||
velocypack::ValueType valueType() const;
|
||||
ValueType valueType() const;
|
||||
|
||||
/// @brief Add observer for this node
|
||||
bool addObserver(std::string const&);
|
||||
|
@ -216,7 +218,7 @@ class Node {
|
|||
std::string getString() const;
|
||||
|
||||
/// @brief Get array value
|
||||
velocypack::Slice getArray() const;
|
||||
Slice getArray() const;
|
||||
|
||||
protected:
|
||||
/// @brief Add time to live entry
|
||||
|
@ -232,8 +234,8 @@ class Node {
|
|||
Store* _store; ///< @brief Store
|
||||
Children _children; ///< @brief child nodes
|
||||
TimePoint _ttl; ///< @brief my expiry
|
||||
std::vector<velocypack::Buffer<uint8_t>> _value; ///< @brief my value
|
||||
mutable velocypack::Buffer<uint8_t> _vecBuf;
|
||||
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
|
||||
mutable Buffer<uint8_t> _vecBuf;
|
||||
mutable bool _vecBufDirty;
|
||||
bool _isArray;
|
||||
};
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "Agency/Job.h"
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
|
||||
std::string const& jobId, std::string const& creator,
|
||||
|
|
|
@ -35,10 +35,10 @@
|
|||
#include "Rest/Version.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::rest;
|
||||
using namespace arangodb::velocypack;
|
||||
using namespace arangodb::consensus;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ArangoDB server
|
||||
|
|
|
@ -184,7 +184,7 @@ class State {
|
|||
size_t _cur;
|
||||
|
||||
/// @brief Operation options
|
||||
OperationOptions _options;
|
||||
arangodb::OperationOptions _options;
|
||||
|
||||
/// @brief Empty log entry;
|
||||
static log_t emptyLog;
|
||||
|
|
|
@ -40,9 +40,8 @@
|
|||
#include <iomanip>
|
||||
#include <regex>
|
||||
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
using namespace arangodb::basics;
|
||||
|
||||
/// Non-Emptyness of string
|
||||
struct NotEmpty {
|
||||
|
|
|
@ -28,9 +28,6 @@
|
|||
#include "Basics/Thread.h"
|
||||
#include "Node.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
||||
namespace arangodb {
|
||||
namespace consensus {
|
||||
|
||||
|
@ -61,10 +58,10 @@ class Store : public arangodb::Thread {
|
|||
std::vector<bool> apply(query_t const& query, bool verbose = false);
|
||||
|
||||
/// @brief Apply single entry in query
|
||||
bool apply(velocypack::Slice const& query, bool verbose = false);
|
||||
bool apply(Slice const& query, bool verbose = false);
|
||||
|
||||
/// @brief Apply entry in query
|
||||
std::vector<bool> apply(std::vector<velocypack::Slice> const& query,
|
||||
std::vector<bool> apply(std::vector<Slice> const& query,
|
||||
index_t lastCommitIndex, term_t term,
|
||||
bool inform = true);
|
||||
|
||||
|
@ -82,7 +79,7 @@ class Store : public arangodb::Thread {
|
|||
bool start();
|
||||
|
||||
/// @brief Dump everything to builder
|
||||
void dumpToBuilder(velocypack::Builder&) const;
|
||||
void dumpToBuilder(Builder&) const;
|
||||
|
||||
/// @brief Notify observers
|
||||
void notifyObservers() const;
|
||||
|
@ -93,7 +90,7 @@ class Store : public arangodb::Thread {
|
|||
Store& operator=(VPackSlice const& slice);
|
||||
|
||||
/// @brief Create Builder representing this store
|
||||
void toBuilder(velocypack::Builder&, bool showHidden = false) const;
|
||||
void toBuilder(Builder&, bool showHidden = false) const;
|
||||
|
||||
/// @brief Copy out a node
|
||||
Node get(std::string const& path) const;
|
||||
|
|
|
@ -41,9 +41,9 @@
|
|||
#include "Basics/MutexLocker.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::application_features;
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
using namespace arangodb::application_features;
|
||||
|
||||
std::string Supervision::_agencyPrefix = "/arango";
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "Agency/Job.h"
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
UnassumedLeadership::UnassumedLeadership(
|
||||
Node const& snapshot, Agent* agent, std::string const& jobId,
|
||||
|
|
|
@ -39,7 +39,6 @@ using namespace arangodb;
|
|||
using namespace arangodb::application_features;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::consensus;
|
||||
using namespace arangodb::velocypack;
|
||||
|
||||
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
|
|
|
@ -418,7 +418,6 @@ struct AstNode {
|
|||
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
|
||||
auto node = getAttributeAccessForVariable(allowIndexedAccess);
|
||||
|
||||
|
||||
if (node == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ class AqlItemBlock;
|
|||
|
||||
class ExecutionEngine;
|
||||
|
||||
class SingletonBlock : public ExecutionBlock {
|
||||
class SingletonBlock final : public ExecutionBlock {
|
||||
public:
|
||||
SingletonBlock(ExecutionEngine* engine, SingletonNode const* ep)
|
||||
: ExecutionBlock(engine, ep), _inputRegisterValues(nullptr), _whitelistBuilt(false) {}
|
||||
|
@ -75,7 +75,7 @@ class SingletonBlock : public ExecutionBlock {
|
|||
bool _whitelistBuilt;
|
||||
};
|
||||
|
||||
class FilterBlock : public ExecutionBlock {
|
||||
class FilterBlock final : public ExecutionBlock {
|
||||
public:
|
||||
FilterBlock(ExecutionEngine*, FilterNode const*);
|
||||
|
||||
|
@ -112,7 +112,7 @@ class FilterBlock : public ExecutionBlock {
|
|||
BlockCollector _collector;
|
||||
};
|
||||
|
||||
class LimitBlock : public ExecutionBlock {
|
||||
class LimitBlock final : public ExecutionBlock {
|
||||
public:
|
||||
LimitBlock(ExecutionEngine* engine, LimitNode const* ep)
|
||||
: ExecutionBlock(engine, ep),
|
||||
|
@ -145,7 +145,7 @@ class LimitBlock : public ExecutionBlock {
|
|||
bool const _fullCount;
|
||||
};
|
||||
|
||||
class ReturnBlock : public ExecutionBlock {
|
||||
class ReturnBlock final : public ExecutionBlock {
|
||||
public:
|
||||
ReturnBlock(ExecutionEngine* engine, ReturnNode const* ep)
|
||||
: ExecutionBlock(engine, ep), _returnInheritedResults(false) {}
|
||||
|
@ -168,7 +168,7 @@ class ReturnBlock : public ExecutionBlock {
|
|||
bool _returnInheritedResults;
|
||||
};
|
||||
|
||||
class NoResultsBlock : public ExecutionBlock {
|
||||
class NoResultsBlock final : public ExecutionBlock {
|
||||
public:
|
||||
NoResultsBlock(ExecutionEngine* engine, NoResultsNode const* ep)
|
||||
: ExecutionBlock(engine, ep) {}
|
||||
|
|
|
@ -34,7 +34,7 @@ class AqlItemBlock;
|
|||
|
||||
class ExecutionEngine;
|
||||
|
||||
class CalculationBlock : public ExecutionBlock {
|
||||
class CalculationBlock final : public ExecutionBlock {
|
||||
public:
|
||||
CalculationBlock(ExecutionEngine*, CalculationNode const*);
|
||||
|
||||
|
|
|
@ -330,6 +330,16 @@ AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
|
|||
delete cur;
|
||||
_gatherBlockBuffer.at(val.first).pop_front();
|
||||
_gatherBlockPos.at(val.first) = std::make_pair(val.first, 0);
|
||||
|
||||
if (_gatherBlockBuffer.at(val.first).empty()) {
|
||||
// if we pulled everything from the buffer, we need to fetch
|
||||
// more data for the shard for which we have no more local
|
||||
// values.
|
||||
getBlock(val.first, atLeast, atMost);
|
||||
// note that if getBlock() returns false here, this is not
|
||||
// a problem, because the sort function used takes care of
|
||||
// this
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ class ExecutionEngine;
|
|||
|
||||
typedef std::vector<Aggregator*> AggregateValuesType;
|
||||
|
||||
class SortedCollectBlock : public ExecutionBlock {
|
||||
class SortedCollectBlock final : public ExecutionBlock {
|
||||
private:
|
||||
typedef std::vector<Aggregator*> AggregateValuesType;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ class AqlItemBlock;
|
|||
struct Collection;
|
||||
class ExecutionEngine;
|
||||
|
||||
class EnumerateCollectionBlock : public ExecutionBlock {
|
||||
class EnumerateCollectionBlock final : public ExecutionBlock {
|
||||
public:
|
||||
EnumerateCollectionBlock(ExecutionEngine* engine,
|
||||
EnumerateCollectionNode const* ep);
|
||||
|
|
|
@ -1139,7 +1139,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
|
|||
regsToClear.emplace(r);
|
||||
}
|
||||
}
|
||||
en->setRegsToClear(regsToClear);
|
||||
en->setRegsToClear(std::move(regsToClear));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -586,8 +586,8 @@ class ExecutionNode {
|
|||
void toVelocyPackHelperGeneric(arangodb::velocypack::Builder&, bool) const;
|
||||
|
||||
/// @brief set regs to be deleted
|
||||
void setRegsToClear(std::unordered_set<RegisterId> const& toClear) {
|
||||
_regsToClear = toClear;
|
||||
void setRegsToClear(std::unordered_set<RegisterId>&& toClear) {
|
||||
_regsToClear = std::move(toClear);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
|
|
@ -60,7 +60,7 @@ struct NonConstExpression {
|
|||
~NonConstExpression() { delete expression; }
|
||||
};
|
||||
|
||||
class IndexBlock : public ExecutionBlock {
|
||||
class IndexBlock final : public ExecutionBlock {
|
||||
public:
|
||||
IndexBlock(ExecutionEngine* engine, IndexNode const* ep);
|
||||
|
||||
|
|
|
@ -4100,47 +4100,54 @@ MMFilesGeoIndexInfo iterativePreorderWithCondition(EN::NodeType type, AstNode* r
|
|||
return MMFilesGeoIndexInfo{};
|
||||
}
|
||||
|
||||
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode*,AstNode*> const& pair, ExecutionPlan* plan, MMFilesGeoIndexInfo info){
|
||||
using SV = std::vector<std::string>;
|
||||
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode const*, AstNode const*> const& pair,
|
||||
ExecutionPlan* plan, MMFilesGeoIndexInfo info){
|
||||
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess1;
|
||||
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess2;
|
||||
|
||||
// first and second should be based on the same document - need to provide the document
|
||||
// in order to see which collection is bound to it and if that collections supports geo-index
|
||||
if( !pair.first->isAttributeAccessForVariable() || !pair.second->isAttributeAccessForVariable()){
|
||||
if (!pair.first->isAttributeAccessForVariable(attributeAccess1) ||
|
||||
!pair.second->isAttributeAccessForVariable(attributeAccess2)) {
|
||||
info.invalidate();
|
||||
return info;
|
||||
}
|
||||
|
||||
TRI_ASSERT(attributeAccess1.first != nullptr);
|
||||
TRI_ASSERT(attributeAccess2.first != nullptr);
|
||||
|
||||
// expect access of the for doc.attribute
|
||||
// TODO: more complex access path have to be added: loop until REFERENCE TYPE IS FOUND
|
||||
auto setter1 = plan->getVarSetBy(static_cast<Variable const*>(pair.first->getMember(0)->getData())->id);
|
||||
auto setter2 = plan->getVarSetBy(static_cast<Variable const*>(pair.second->getMember(0)->getData())->id);
|
||||
SV accessPath1{pair.first->getString()};
|
||||
SV accessPath2{pair.second->getString()};
|
||||
auto setter1 = plan->getVarSetBy(attributeAccess1.first->id);
|
||||
auto setter2 = plan->getVarSetBy(attributeAccess2.first->id);
|
||||
|
||||
if(setter1 == setter2){
|
||||
if(setter1->getType() == EN::ENUMERATE_COLLECTION){
|
||||
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
|
||||
if (setter1 != nullptr &&
|
||||
setter2 != nullptr &&
|
||||
setter1 == setter2 &&
|
||||
setter1->getType() == EN::ENUMERATE_COLLECTION) {
|
||||
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
|
||||
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
|
||||
auto lcoll = coll->getCollection();
|
||||
// TODO - check collection for suitable geo-indexes
|
||||
for(auto indexShardPtr : lcoll->getIndexes()){
|
||||
// get real index
|
||||
arangodb::Index& index = *indexShardPtr.get();
|
||||
|
||||
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
|
||||
auto lcoll = coll->getCollection();
|
||||
// TODO - check collection for suitable geo-indexes
|
||||
for(auto indexShardPtr : lcoll->getIndexes()){
|
||||
// get real index
|
||||
arangodb::Index& index = *indexShardPtr.get();
|
||||
// check if current index is a geo-index
|
||||
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
|
||||
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// check if current index is a geo-index
|
||||
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
|
||||
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX){
|
||||
continue;
|
||||
}
|
||||
TRI_ASSERT(index.fields().size() == 2);
|
||||
|
||||
//check access paths of attributes in ast and those in index match
|
||||
if( index.fieldNames()[0] == accessPath1 && index.fieldNames()[1] == accessPath2 ){
|
||||
info.collectionNode = collNode;
|
||||
info.index = indexShardPtr;
|
||||
info.longitude = std::move(accessPath1);
|
||||
info.latitude = std::move(accessPath2);
|
||||
return info;
|
||||
}
|
||||
//check access paths of attributes in ast and those in index match
|
||||
if (index.fields()[0] == attributeAccess1.second &&
|
||||
index.fields()[1] == attributeAccess2.second) {
|
||||
info.collectionNode = collNode;
|
||||
info.index = indexShardPtr;
|
||||
TRI_AttributeNamesJoinNested(attributeAccess1.second, info.longitude, true);
|
||||
TRI_AttributeNamesJoinNested(attributeAccess2.second, info.latitude, true);
|
||||
return info;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ class AqlItemBlock;
|
|||
|
||||
class ExecutionEngine;
|
||||
|
||||
class SortBlock : public ExecutionBlock {
|
||||
class SortBlock final : public ExecutionBlock {
|
||||
public:
|
||||
SortBlock(ExecutionEngine*, SortNode const*);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ class ManagedDocumentResult;
|
|||
|
||||
namespace aql {
|
||||
|
||||
class TraversalBlock : public ExecutionBlock {
|
||||
class TraversalBlock final : public ExecutionBlock {
|
||||
public:
|
||||
TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep);
|
||||
|
||||
|
|
|
@ -286,11 +286,9 @@ void ClusterFeature::prepare() {
|
|||
ServerState::instance()->setId(_myId);
|
||||
}
|
||||
|
||||
if (_requestedRole != ServerState::RoleEnum::ROLE_UNDEFINED) {
|
||||
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
|
||||
LOG(FATAL) << "Couldn't register at agency.";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
|
||||
LOG(FATAL) << "Couldn't register at agency.";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
auto role = ServerState::instance()->getRole();
|
||||
|
|
|
@ -54,6 +54,8 @@ void DBServerAgencySync::work() {
|
|||
DBServerAgencySyncResult DBServerAgencySync::execute() {
|
||||
// default to system database
|
||||
|
||||
double startTime = TRI_microtime();
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting";
|
||||
DatabaseFeature* database =
|
||||
ApplicationServer::getFeature<DatabaseFeature>("Database");
|
||||
|
@ -80,6 +82,11 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
|
|||
return result;
|
||||
}
|
||||
|
||||
double now = TRI_microtime();
|
||||
if (now - startTime > 5.0) {
|
||||
LOG(INFO) << "DBServerAgencySync::execute took more than 5s to get free V8 context, starting handle-plan-change now";
|
||||
}
|
||||
|
||||
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
|
||||
|
||||
auto isolate = context->_isolate;
|
||||
|
|
|
@ -251,51 +251,70 @@ bool ServerState::unregister() {
|
|||
bool ServerState::registerWithRole(ServerState::RoleEnum role,
|
||||
std::string const& myAddress) {
|
||||
|
||||
if (!getId().empty()) {
|
||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
||||
<< "Registering with role and localinfo. Supplied id is being ignored";
|
||||
return false;
|
||||
}
|
||||
|
||||
AgencyComm comm;
|
||||
AgencyCommResult result;
|
||||
std::string localInfoEncoded = StringUtils::replace(
|
||||
StringUtils::urlEncode(getLocalInfo()),"%2E",".");
|
||||
result = comm.getValues("Target/MapLocalToID/" + localInfoEncoded);
|
||||
|
||||
std::string locinf = "Target/MapLocalToID/" +
|
||||
(localInfoEncoded.empty() ? "bogus_hass_hund" : localInfoEncoded);
|
||||
std::string dbidinf = "Plan/DBServers/" +
|
||||
(_id.empty() ? "bogus_hass_hund" : _id);
|
||||
std::string coidinf = "Plan/Coordinators/" +
|
||||
(_id.empty() ? "bogus_hass_hund" : _id);
|
||||
|
||||
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
|
||||
AgencyGeneralTransaction reg;
|
||||
reg.operations.push_back( // my-local-info
|
||||
operationType(AgencyOperation(locinf), AgencyPrecondition()));
|
||||
reg.operations.push_back( // db my-id
|
||||
operationType(AgencyOperation(dbidinf), AgencyPrecondition()));
|
||||
reg.operations.push_back( // cooord my-id
|
||||
operationType(AgencyOperation(coidinf), AgencyPrecondition()));
|
||||
result = comm.sendTransactionWithFailover(reg, 0.0);
|
||||
|
||||
std::string id;
|
||||
bool found = true;
|
||||
|
||||
if (!result.successful()) {
|
||||
found = false;
|
||||
} else {
|
||||
VPackSlice idSlice = result.slice()[0].get(
|
||||
std::vector<std::string>({AgencyCommManager::path(), "Target",
|
||||
"MapLocalToID", localInfoEncoded}));
|
||||
if (!idSlice.isString()) {
|
||||
found = false;
|
||||
} else {
|
||||
id = idSlice.copyString();
|
||||
LOG(WARN) << "Have ID: " + id;
|
||||
if (result.slice().isArray()) {
|
||||
|
||||
VPackSlice targetSlice, planSlice;
|
||||
if (!_id.empty()) {
|
||||
try {
|
||||
if (
|
||||
result.slice()[1].get(
|
||||
std::vector<std::string>({AgencyCommManager::path(), "Plan",
|
||||
"DBServers", _id})).isString()) {
|
||||
id = _id;
|
||||
if (role == ServerState::ROLE_UNDEFINED) {
|
||||
role = ServerState::ROLE_PRIMARY;
|
||||
}
|
||||
} else if (
|
||||
result.slice()[2].get(
|
||||
std::vector<std::string>({AgencyCommManager::path(), "Plan",
|
||||
"Coordinators", _id})).isString()) {
|
||||
id = _id;
|
||||
if (role == ServerState::ROLE_UNDEFINED) {
|
||||
role = ServerState::ROLE_COORDINATOR;
|
||||
}
|
||||
}
|
||||
} catch (...) {}
|
||||
} else if (!localInfoEncoded.empty()) {
|
||||
try {
|
||||
id = result.slice()[0].get(
|
||||
std::vector<std::string>({AgencyCommManager::path(), "Target",
|
||||
"MapLocalToID", localInfoEncoded})).copyString();
|
||||
} catch (...) {}
|
||||
}
|
||||
}
|
||||
createIdForRole(comm, role, id);
|
||||
if (found) {
|
||||
|
||||
} else {
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< "Determining id from localinfo failed."
|
||||
<< "Continuing with registering ourselves for the first time";
|
||||
id = createIdForRole(comm, role);
|
||||
}
|
||||
|
||||
|
||||
id = createIdForRole(comm, role, id);
|
||||
|
||||
const std::string agencyKey = roleToAgencyKey(role);
|
||||
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
||||
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
||||
|
||||
auto builder = std::make_shared<VPackBuilder>();
|
||||
result = comm.getValues(planKey);
|
||||
found = true;
|
||||
bool found = true;
|
||||
if (!result.successful()) {
|
||||
found = false;
|
||||
} else {
|
||||
|
@ -379,6 +398,9 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
|||
|
||||
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
|
||||
std::string const agencyKey = roleToAgencyKey(role);
|
||||
std::string roleName = ((role == ROLE_COORDINATOR) ? "Coordinator":"DBServer");
|
||||
|
||||
size_t shortNum(0);
|
||||
|
||||
VPackBuilder builder;
|
||||
builder.add(VPackValue("none"));
|
||||
|
@ -392,11 +414,22 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
|||
|
||||
auto filePath = dbpath->directory() + "/UUID";
|
||||
std::ifstream ifs(filePath);
|
||||
|
||||
if (!id.empty()) {
|
||||
if (id.compare(0, roleName.size(), roleName) == 0) {
|
||||
try {
|
||||
shortNum = std::stoul(id.substr(roleName.size(),3));
|
||||
} catch(...) {
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
|
||||
"Old id cannot be parsed for number.";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ifs.is_open()) {
|
||||
std::getline(ifs, id);
|
||||
ifs.close();
|
||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< "Restarting with persisted UUID " << id;
|
||||
} else {
|
||||
mkdir (dbpath->directory());
|
||||
|
@ -451,7 +484,7 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
|||
reg.operations.push_back( // Get shortID
|
||||
operationType(AgencyOperation(targetIdStr), AgencyPrecondition()));
|
||||
result = comm.sendTransactionWithFailover(reg, 0.0);
|
||||
|
||||
|
||||
VPackSlice latestId = result.slice()[2].get(
|
||||
std::vector<std::string>(
|
||||
{AgencyCommManager::path(), "Target",
|
||||
|
@ -464,7 +497,8 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
|||
localIdBuilder.add("TransactionID", latestId);
|
||||
std::stringstream ss; // ShortName
|
||||
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
|
||||
<< std::setw(4) << std::setfill('0') << latestId.getNumber<uint32_t>();
|
||||
<< std::setw(4) << std::setfill('0')
|
||||
<< (shortNum ==0 ? latestId.getNumber<uint32_t>() : shortNum);
|
||||
std::string shortName = ss.str();
|
||||
localIdBuilder.add("ShortName", VPackValue(shortName));
|
||||
}
|
||||
|
|
|
@ -754,7 +754,11 @@ static void JS_GetCollectionInfoClusterInfo(
|
|||
uint32_t pos = 0;
|
||||
for (auto const& s : p.second) {
|
||||
try{
|
||||
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(s)));
|
||||
std::string t = s;
|
||||
if (s.at(0) == '_') {
|
||||
t = s.substr(1);
|
||||
}
|
||||
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(t)));
|
||||
} catch (...) {}
|
||||
list->Set(pos++, TRI_V8_STD_STRING(s));
|
||||
}
|
||||
|
@ -985,11 +989,23 @@ static void JS_GetDBServers(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
auto serverAliases = ClusterInfo::instance()->getServerAliases();
|
||||
|
||||
v8::Handle<v8::Array> l = v8::Array::New(isolate);
|
||||
|
||||
for (size_t i = 0; i < DBServers.size(); ++i) {
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(DBServers[i]));
|
||||
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
||||
TRI_V8_STD_STRING(serverAliases.at(DBServers[i])));
|
||||
auto id = DBServers[i];
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(id));
|
||||
|
||||
auto itr = serverAliases.find(id);
|
||||
|
||||
if (itr != serverAliases.end()) {
|
||||
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
||||
TRI_V8_STD_STRING(itr->second));
|
||||
} else {
|
||||
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
||||
TRI_V8_STD_STRING(id));
|
||||
}
|
||||
|
||||
l->Set((uint32_t)i, result);
|
||||
}
|
||||
|
||||
|
|
|
@ -91,8 +91,6 @@ class GeneralCommTask : public SocketTask {
|
|||
|
||||
virtual arangodb::Endpoint::TransportType transportType() = 0;
|
||||
|
||||
void setStatistics(uint64_t, RequestStatistics*);
|
||||
|
||||
protected:
|
||||
virtual std::unique_ptr<GeneralResponse> createResponse(
|
||||
rest::ResponseCode, uint64_t messageId) = 0;
|
||||
|
@ -111,6 +109,7 @@ class GeneralCommTask : public SocketTask {
|
|||
std::string const& errorMessage,
|
||||
uint64_t messageId) = 0;
|
||||
|
||||
void setStatistics(uint64_t, RequestStatistics*);
|
||||
RequestStatistics* acquireStatistics(uint64_t);
|
||||
RequestStatistics* statistics(uint64_t);
|
||||
RequestStatistics* stealStatistics(uint64_t);
|
||||
|
|
|
@ -303,9 +303,17 @@ void DatabaseFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
&_check30Revisions,
|
||||
std::unordered_set<std::string>{"true", "false", "fail"}));
|
||||
|
||||
// the following option was removed in 3.2
|
||||
// index-creation is now automatically parallelized via the Boost ASIO thread pool
|
||||
options->addObsoleteOption(
|
||||
"--database.index-threads",
|
||||
"threads to start for parallel background index creation", true);
|
||||
|
||||
// the following options were removed in 3.2
|
||||
options->addObsoleteOption("--database.revision-cache-chunk-size",
|
||||
"chunk size (in bytes) for the document revisions cache", true);
|
||||
options->addObsoleteOption("--database.revision-cache-target-size",
|
||||
"total target size (in bytes) for the document revisions cache", true);
|
||||
}
|
||||
|
||||
void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
|
|
|
@ -49,7 +49,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
|
|||
double keepAliveTimeout, bool skipInit = false)
|
||||
: Task(loop, "SocketTask"),
|
||||
_connectionStatistics(nullptr),
|
||||
_connectionInfo(connectionInfo),
|
||||
_connectionInfo(std::move(connectionInfo)),
|
||||
_readBuffer(TRI_UNKNOWN_MEM_ZONE, READ_BLOCK_SIZE + 1, false),
|
||||
_writeBuffer(nullptr, nullptr),
|
||||
_peer(std::move(socket)),
|
||||
|
|
|
@ -1777,16 +1777,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
|
||||
LogicalCollection* collection = documentCollection(trxCollection(cid));
|
||||
|
||||
// First see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
if (options.returnNew) {
|
||||
orderDitch(cid); // will throw when it fails
|
||||
}
|
||||
|
@ -1817,11 +1807,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
return res;
|
||||
}
|
||||
|
||||
if (options.silent && !doingSynchronousReplication) {
|
||||
// no need to construct the result object
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
uint8_t const* vpack = result.vpack();
|
||||
TRI_ASSERT(vpack != nullptr);
|
||||
|
||||
|
@ -1864,6 +1849,15 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
|
@ -1950,7 +1944,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
}
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && options.silent) {
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
resultBuilder.clear();
|
||||
}
|
||||
|
@ -2078,16 +2072,6 @@ OperationResult Transaction::modifyLocal(
|
|||
orderDitch(cid); // will throw when it fails
|
||||
}
|
||||
|
||||
// First see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
// Update/replace are a read and a write, let's get the write lock already
|
||||
// for the read operation:
|
||||
int res = lock(trxCollection(cid), AccessMode::Type::WRITE);
|
||||
|
@ -2125,7 +2109,7 @@ OperationResult Transaction::modifyLocal(
|
|||
|
||||
if (res == TRI_ERROR_ARANGO_CONFLICT) {
|
||||
// still return
|
||||
if ((!options.silent || doingSynchronousReplication) && !isBabies) {
|
||||
if (!isBabies) {
|
||||
StringRef key(newVal.get(StaticStrings::KeyString));
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||
|
@ -2138,13 +2122,11 @@ OperationResult Transaction::modifyLocal(
|
|||
uint8_t const* vpack = result.vpack();
|
||||
TRI_ASSERT(vpack != nullptr);
|
||||
|
||||
if (!options.silent || doingSynchronousReplication) {
|
||||
StringRef key(newVal.get(StaticStrings::KeyString));
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key,
|
||||
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
|
||||
options.returnOld ? previous.vpack() : nullptr ,
|
||||
options.returnNew ? vpack : nullptr);
|
||||
}
|
||||
StringRef key(newVal.get(StaticStrings::KeyString));
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key,
|
||||
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
|
||||
options.returnOld ? previous.vpack() : nullptr ,
|
||||
options.returnNew ? vpack : nullptr);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
};
|
||||
|
||||
|
@ -2173,6 +2155,16 @@ OperationResult Transaction::modifyLocal(
|
|||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
|
@ -2262,7 +2254,7 @@ OperationResult Transaction::modifyLocal(
|
|||
}
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && options.silent) {
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
resultBuilder.clear();
|
||||
}
|
||||
|
@ -2332,16 +2324,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
orderDitch(cid); // will throw when it fails
|
||||
}
|
||||
|
||||
// First see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
VPackBuilder resultBuilder;
|
||||
TRI_voc_tick_t maxTick = 0;
|
||||
|
||||
|
@ -2380,7 +2362,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (res == TRI_ERROR_ARANGO_CONFLICT &&
|
||||
(!options.silent || doingSynchronousReplication) &&
|
||||
!isBabies) {
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||
|
@ -2388,10 +2369,8 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
return res;
|
||||
}
|
||||
|
||||
if (!options.silent || doingSynchronousReplication) {
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||
}
|
||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
};
|
||||
|
@ -2418,6 +2397,16 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (ServerState::isDBServer(_serverRole)) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = followers->size() > 0;
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
|
@ -2505,7 +2494,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
}
|
||||
}
|
||||
|
||||
if (doingSynchronousReplication && options.silent) {
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
resultBuilder.clear();
|
||||
}
|
||||
|
|
|
@ -838,7 +838,7 @@ static TRI_action_result_t ExecuteActionVocbase(
|
|||
// copy suffix, which comes from the action:
|
||||
std::string path = request->prefix();
|
||||
v8::Handle<v8::Array> suffixArray = v8::Array::New(isolate);
|
||||
std::vector<std::string> const& suffixes = request->suffixes();
|
||||
std::vector<std::string> const& suffixes = request->suffixes(); // TODO: does this need to be decodedSuffixes()??
|
||||
|
||||
uint32_t index = 0;
|
||||
char const* sep = "";
|
||||
|
|
|
@ -1,17 +1,12 @@
|
|||
# -*- mode: CMAKE; -*-
|
||||
# these are the install targets for the client package.
|
||||
# we can't use RUNTIME DESTINATION here.
|
||||
# include(/tmp/dump_vars.cmake)
|
||||
message( "CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}/ CMAKE_INSTALL_SBINDIR ${CMAKE_INSTALL_SBINDIR}")
|
||||
|
||||
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
|
||||
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_SBINDIR})
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_SBINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
)
|
||||
|
|
|
@ -2,63 +2,30 @@
|
|||
# these are the install targets for the client package.
|
||||
# we can't use RUNTIME DESTINATION here.
|
||||
|
||||
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
|
||||
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
|
|
|
@ -19,9 +19,9 @@ endif()
|
|||
# debug info directory:
|
||||
if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64")
|
||||
# some systems have weird places for usr/lib:
|
||||
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/${CMAKE_PROJECT_NAME}")
|
||||
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/")
|
||||
else ()
|
||||
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_PROJECT_NAME}")
|
||||
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/")
|
||||
endif ()
|
||||
|
||||
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}")
|
||||
|
|
|
@ -157,3 +157,39 @@ macro(to_native_path sourceVarName)
|
|||
endif()
|
||||
set("INC_${sourceVarName}" ${myVar})
|
||||
endmacro()
|
||||
|
||||
macro(install_debinfo
|
||||
STRIP_DIR
|
||||
USER_SUB_DEBINFO_DIR
|
||||
USER_FILE
|
||||
USER_STRIP_FILE)
|
||||
|
||||
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
|
||||
set(FILE ${USER_FILE})
|
||||
set(STRIP_FILE ${USER_STRIP_FILE})
|
||||
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
|
||||
execute_process(
|
||||
COMMAND ${FILE_EXECUTABLE} ${FILE}
|
||||
OUTPUT_VARIABLE FILE_RESULT)
|
||||
|
||||
string(REGEX
|
||||
REPLACE ".*=([a-z0-9]*),.*" "\\1"
|
||||
FILE_CHECKSUM
|
||||
${FILE_RESULT}
|
||||
)
|
||||
|
||||
if (NOT ${FILE_CHECKSUM} STREQUAL "")
|
||||
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
|
||||
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
|
||||
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
|
||||
endif()
|
||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
|
||||
endmacro()
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
################################################################################
|
||||
# the client package is a complete cmake sub package.
|
||||
################################################################################
|
||||
project(PACKAGE-DBG)
|
||||
project(@CMAKE_PROJECT_NAME@)
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
################################################################################
|
||||
|
@ -15,6 +15,9 @@ set(CROSS_COMPILING @CROSS_COMPILING@)
|
|||
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
|
||||
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
|
||||
|
||||
set(CMAKE_INSTALL_SBINDIR @CMAKE_INSTALL_SBINDIR@)
|
||||
set(CMAKE_INSTALL_FULL_SBINDIR @CMAKE_INSTALL_FULL_SBINDIR@)
|
||||
|
||||
set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@)
|
||||
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@)
|
||||
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@)
|
||||
|
|
|
@ -69,6 +69,10 @@
|
|||
return shortName;
|
||||
},
|
||||
|
||||
getDatabaseShortName: function (id) {
|
||||
return this.getCoordinatorShortName(id);
|
||||
},
|
||||
|
||||
getDatabaseServerId: function (shortname) {
|
||||
var id;
|
||||
if (window.clusterHealth) {
|
||||
|
|
|
@ -186,14 +186,15 @@
|
|||
async: true,
|
||||
success: function (data) {
|
||||
if (data.id) {
|
||||
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
|
||||
console.log(toServer);
|
||||
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
|
||||
window.setTimeout(function () {
|
||||
window.App.shardsView.render();
|
||||
}, 3000);
|
||||
}
|
||||
},
|
||||
error: function () {
|
||||
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
|
||||
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -498,13 +498,20 @@ router.get("/coordshort", function(req, res) {
|
|||
if (Array.isArray(coordinators)) {
|
||||
var coordinatorStats = coordinators.map(coordinator => {
|
||||
var endpoint = global.ArangoClusterInfo.getServerEndpoint(coordinator);
|
||||
var response = download(endpoint.replace(/^tcp/, "http") + "/_db/_system/_admin/aardvark/statistics/short?count=" + coordinators.length, '', {headers: {}});
|
||||
try {
|
||||
return JSON.parse(response.body);
|
||||
} catch (e) {
|
||||
console.error("Couldn't read statistics response:", response.body);
|
||||
throw e;
|
||||
if (endpoint !== "") {
|
||||
var response = download(endpoint.replace(/^tcp/, "http") + "/_db/_system/_admin/aardvark/statistics/short?count=" + coordinators.length, '', {headers: {}});
|
||||
if (response.body === undefined) {
|
||||
console.warn("cannot contact coordinator " + coordinator + " on endpoint " + endpoint);
|
||||
} else {
|
||||
try {
|
||||
return JSON.parse(response.body);
|
||||
} catch (e) {
|
||||
console.error("Couldn't read statistics response:", response.body);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
mergeHistory(coordinatorStats);
|
||||
|
|
|
@ -179,7 +179,7 @@ const optionsDefaults = {
|
|||
'skipBoost': false,
|
||||
'skipEndpoints': false,
|
||||
'skipGeo': false,
|
||||
'skipLogAnalysis': false,
|
||||
'skipLogAnalysis': true,
|
||||
'skipMemoryIntense': false,
|
||||
'skipNightly': true,
|
||||
'skipNondeterministic': false,
|
||||
|
|
|
@ -455,6 +455,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
// synchronize this shard from the leader
|
||||
// this function will throw if anything goes wrong
|
||||
|
||||
var startTime = new Date();
|
||||
var isStopping = require('internal').isStopping;
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
|
||||
|
@ -487,8 +488,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
planned[0] !== leader) {
|
||||
// Things have changed again, simply terminate:
|
||||
terminateAndStartOther();
|
||||
console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s',
|
||||
database, shard, database, planId);
|
||||
let endTime = new Date();
|
||||
console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s, started %s, ended %s',
|
||||
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||
return;
|
||||
}
|
||||
var current = [];
|
||||
|
@ -502,8 +504,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
}
|
||||
// We are already there, this is rather strange, but never mind:
|
||||
terminateAndStartOther();
|
||||
console.debug('synchronizeOneShard: already done, %s/%s, %s/%s',
|
||||
database, shard, database, planId);
|
||||
let endTime = new Date();
|
||||
console.debug('synchronizeOneShard: already done, %s/%s, %s/%s, started %s, ended %s',
|
||||
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||
return;
|
||||
}
|
||||
console.debug('synchronizeOneShard: waiting for leader, %s/%s, %s/%s',
|
||||
|
@ -524,9 +527,16 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
if (isStopping()) {
|
||||
throw 'server is shutting down';
|
||||
}
|
||||
let startTime = new Date();
|
||||
sy = rep.syncCollection(shard,
|
||||
{ endpoint: ep, incremental: true,
|
||||
keepBarrier: true, useCollectionId: false });
|
||||
let endTime = new Date();
|
||||
let longSync = false;
|
||||
if (endTime - startTime > 5000) {
|
||||
console.error('synchronizeOneShard: long call to syncCollection for shard', shard, JSON.stringify(sy), "start time: ", startTime.toString(), "end time: ", endTime.toString());
|
||||
longSync = true;
|
||||
}
|
||||
if (sy.error) {
|
||||
console.error('synchronizeOneShard: could not initially synchronize',
|
||||
'shard ', shard, sy);
|
||||
|
@ -534,7 +544,15 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
} else {
|
||||
if (sy.collections.length === 0 ||
|
||||
sy.collections[0].name !== shard) {
|
||||
if (longSync) {
|
||||
console.error('synchronizeOneShard: long sync, before cancelBarrier',
|
||||
new Date().toString());
|
||||
}
|
||||
cancelBarrier(ep, database, sy.barrierId);
|
||||
if (longSync) {
|
||||
console.error('synchronizeOneShard: long sync, after cancelBarrier',
|
||||
new Date().toString());
|
||||
}
|
||||
throw 'Shard ' + shard + ' seems to be gone from leader!';
|
||||
} else {
|
||||
// Now start a read transaction to stop writes:
|
||||
|
@ -594,14 +612,17 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
} else if (err2 && err2.errorNum === 1402 && err2.errorMessage.match(/HTTP 404/)) {
|
||||
logLevel = 'debug';
|
||||
}
|
||||
console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
||||
database, shard, database, planId, JSON.stringify(err2));
|
||||
let endTime = new Date();
|
||||
console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s, started: %s, ended: %s",
|
||||
database, shard, database, planId, JSON.stringify(err2),
|
||||
startTime.toString(), endTime.toString());
|
||||
}
|
||||
}
|
||||
// Tell others that we are done:
|
||||
terminateAndStartOther();
|
||||
console.debug('synchronizeOneShard: done, %s/%s, %s/%s',
|
||||
database, shard, database, planId);
|
||||
let endTime = new Date();
|
||||
console.debug('synchronizeOneShard: done, %s/%s, %s/%s, started: %s, ended: %s',
|
||||
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||
}
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1789,8 +1810,7 @@ function shardDistribution () {
|
|||
var result = {};
|
||||
for (var i = 0; i < colls.length; ++i) {
|
||||
var collName = colls[i].name();
|
||||
var collInfo = global.ArangoClusterInfo.getCollectionInfo(dbName,
|
||||
collName);
|
||||
var collInfo = global.ArangoClusterInfo.getCollectionInfo(dbName, collName);
|
||||
var shards = collInfo.shards;
|
||||
var collInfoCurrent = {};
|
||||
var shardNames = Object.keys(shards);
|
||||
|
|
|
@ -152,7 +152,11 @@ exports.manage = function () {
|
|||
});
|
||||
|
||||
// switch back into previous database
|
||||
db._useDatabase(initialDatabase);
|
||||
try {
|
||||
db._useDatabase(initialDatabase);
|
||||
} catch (err) {
|
||||
db._useDatabase('_system');
|
||||
}
|
||||
};
|
||||
|
||||
exports.run = function () {
|
||||
|
|
|
@ -55,6 +55,7 @@ function optimizerRuleTestSuite() {
|
|||
|
||||
var ruleName = "geoindex";
|
||||
var colName = "UnitTestsAqlOptimizer" + ruleName.replace(/-/g, "_");
|
||||
var colName2 = colName2;
|
||||
|
||||
var geocol;
|
||||
var sortArray = function (l, r) {
|
||||
|
@ -124,11 +125,21 @@ function optimizerRuleTestSuite() {
|
|||
internal.db._drop(colName);
|
||||
geocol = internal.db._create(colName);
|
||||
geocol.ensureIndex({type:"geo", fields:["lat","lon"]});
|
||||
for (var lat=-40; lat <=40 ; ++lat){
|
||||
for (var lon=-40; lon <= 40; ++lon){
|
||||
var lat, lon;
|
||||
for (lat=-40; lat <=40 ; ++lat) {
|
||||
for (lon=-40; lon <= 40; ++lon) {
|
||||
geocol.insert({lat,lon});
|
||||
}
|
||||
}
|
||||
|
||||
internal.db._drop(colName2);
|
||||
geocol = internal.db._create(colName2);
|
||||
geocol.ensureIndex({type:"geo", fields:["loca.tion.lat","loca.tion.lon"]});
|
||||
for (lat=-40; lat <=40 ; ++lat) {
|
||||
for (lon=-40; lon <= 40; ++lon) {
|
||||
geocol.insert({ loca : { tion : { lat , lon } } });
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -137,6 +148,7 @@ function optimizerRuleTestSuite() {
|
|||
|
||||
tearDown : function () {
|
||||
internal.db._drop(colName);
|
||||
internal.db._drop(colName2);
|
||||
geocol = null;
|
||||
},
|
||||
|
||||
|
@ -145,7 +157,13 @@ function optimizerRuleTestSuite() {
|
|||
geocol.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
|
||||
|
||||
var queries = [
|
||||
{ string : "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
||||
{ string : "FOR d IN " + colName + " SORT distance(d.lat, d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
||||
cluster : false,
|
||||
sort : false,
|
||||
filter : false,
|
||||
index : true
|
||||
},
|
||||
{ string : "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat, d.loca.tion.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
||||
cluster : false,
|
||||
sort : false,
|
||||
filter : false,
|
||||
|
@ -213,11 +231,15 @@ function optimizerRuleTestSuite() {
|
|||
testRuleRemoveNodes : function () {
|
||||
if(enabled.removeNodes){
|
||||
var queries = [
|
||||
[ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||
[ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||
[ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ],
|
||||
[ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||
[ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||
[ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ],
|
||||
// [ "FOR i IN 1..2 FOR d IN geocol SORT distance(i,2,d.lat,d.lon) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||
];
|
||||
|
||||
var queries2 = [
|
||||
[ "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat,d.loca.tion.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ]
|
||||
];
|
||||
|
||||
var expected = [
|
||||
[[0,0], [-1,0], [0,1], [1,0], [0,-1]],
|
||||
|
@ -234,6 +256,16 @@ function optimizerRuleTestSuite() {
|
|||
assertEqual(expected[qindex].sort(),pairs.sort());
|
||||
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
|
||||
});
|
||||
|
||||
queries2.forEach(function(query, qindex) {
|
||||
var result = AQL_EXECUTE(query[0]);
|
||||
expect(expected[qindex].length).to.be.equal(result.json.length);
|
||||
var pairs = result.json.map(function(res){
|
||||
return [res.loca.tion.lat,res.loca.tion.lon];
|
||||
});
|
||||
assertEqual(expected[qindex].sort(),pairs.sort());
|
||||
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
|
||||
});
|
||||
}
|
||||
}, // testRuleSort
|
||||
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertTrue, assertFalse, assertEqual, AQL_EXECUTE */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for index usage
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var db = require("@arangodb").db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function sortTestSuite () {
|
||||
var c;
|
||||
|
||||
return {
|
||||
setUp : function () {
|
||||
db._drop("UnitTestsCollection");
|
||||
c = db._create("UnitTestsCollection", { numberOfShards: 8 });
|
||||
|
||||
for (var i = 0; i < 11111; ++i) {
|
||||
c.save({ _key: "test" + i, value: i });
|
||||
}
|
||||
},
|
||||
|
||||
tearDown : function () {
|
||||
db._drop("UnitTestsCollection");
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test without index
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testSortNoIndex : function () {
|
||||
var result = AQL_EXECUTE("FOR doc IN " + c.name() + " SORT doc.value RETURN doc.value").json;
|
||||
assertEqual(11111, result.length);
|
||||
|
||||
var last = -1;
|
||||
for (var i = 0; i < result.length; ++i) {
|
||||
assertTrue(result[i] > last);
|
||||
last = result[i];
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test with index
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testSortSkiplist : function () {
|
||||
c.ensureIndex({ type: "skiplist", fields: [ "value" ]});
|
||||
var result = AQL_EXECUTE("FOR doc IN " + c.name() + " SORT doc.value RETURN doc.value").json;
|
||||
assertEqual(11111, result.length);
|
||||
|
||||
var last = -1;
|
||||
for (var i = 0; i < result.length; ++i) {
|
||||
assertTrue(result[i] > last);
|
||||
last = result[i];
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
jsunity.run(sortTestSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
|
@ -1141,6 +1141,52 @@ bool isSuffix(std::string const& str, std::string const& postfix) {
|
|||
}
|
||||
}
|
||||
|
||||
std::string urlDecodePath(std::string const& str) {
|
||||
std::string result;
|
||||
// reserve enough room so we do not need to re-alloc
|
||||
result.reserve(str.size() + 16);
|
||||
|
||||
char const* src = str.c_str();
|
||||
char const* end = src + str.size();
|
||||
|
||||
while (src < end) {
|
||||
if (*src == '%') {
|
||||
if (src + 2 < end) {
|
||||
int h1 = hex2int(src[1], -1);
|
||||
int h2 = hex2int(src[2], -1);
|
||||
|
||||
if (h1 == -1) {
|
||||
++src;
|
||||
} else {
|
||||
if (h2 == -1) {
|
||||
result.push_back(h1);
|
||||
src += 2;
|
||||
} else {
|
||||
result.push_back(h1 << 4 | h2);
|
||||
src += 3;
|
||||
}
|
||||
}
|
||||
} else if (src + 1 < end) {
|
||||
int h1 = hex2int(src[1], -1);
|
||||
|
||||
if (h1 == -1) {
|
||||
++src;
|
||||
} else {
|
||||
result.push_back(h1);
|
||||
src += 2;
|
||||
}
|
||||
} else {
|
||||
++src;
|
||||
}
|
||||
} else {
|
||||
result.push_back(*src);
|
||||
++src;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string urlDecode(std::string const& str) {
|
||||
std::string result;
|
||||
// reserve enough room so we do not need to re-alloc
|
||||
|
|
|
@ -299,6 +299,7 @@ bool isSuffix(std::string const& str, std::string const& postfix);
|
|||
/// @brief url decodes the string
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::string urlDecodePath(std::string const& str);
|
||||
std::string urlDecode(std::string const& str);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -280,7 +280,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
|
|||
|
||||
// hostname and port (e.g. [address]:port)
|
||||
if (found != std::string::npos && found > 2 && found + 2 < copy.size()) {
|
||||
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 2));
|
||||
int64_t value = StringUtils::int64(copy.substr(found + 2));
|
||||
// check port over-/underrun
|
||||
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
|
||||
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
|
||||
return nullptr;
|
||||
}
|
||||
uint16_t port = static_cast<uint16_t>(value);
|
||||
std::string host = copy.substr(1, found - 1);
|
||||
|
||||
return new EndpointIpV6(type, protocol, encryption, listenBacklog,
|
||||
|
@ -306,7 +312,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
|
|||
|
||||
// hostname and port
|
||||
if (found != std::string::npos && found + 1 < copy.size()) {
|
||||
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 1));
|
||||
int64_t value = StringUtils::int64(copy.substr(found + 1));
|
||||
// check port over-/underrun
|
||||
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
|
||||
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
|
||||
return nullptr;
|
||||
}
|
||||
uint16_t port = static_cast<uint16_t>(value);
|
||||
std::string host = copy.substr(0, found);
|
||||
|
||||
return new EndpointIpV4(type, protocol, encryption, listenBacklog,
|
||||
|
|
|
@ -88,8 +88,9 @@ class LoggerStream {
|
|||
size_t i = 0;
|
||||
size_t const n = obj.size();
|
||||
for (auto const& it : obj) {
|
||||
_out << it;
|
||||
if (++i < n) {
|
||||
_out << it << ", ";
|
||||
_out << ", ";
|
||||
}
|
||||
}
|
||||
_out << ']';
|
||||
|
@ -102,8 +103,9 @@ class LoggerStream {
|
|||
size_t i = 0;
|
||||
size_t const n = obj.size();
|
||||
for (auto const& it : obj) {
|
||||
_out << it;
|
||||
if (++i < n) {
|
||||
_out << it << ", ";
|
||||
_out << ", ";
|
||||
}
|
||||
}
|
||||
_out << '}';
|
||||
|
@ -116,8 +118,9 @@ class LoggerStream {
|
|||
size_t i = 0;
|
||||
size_t n = obj.size();
|
||||
for (auto const& it : obj) {
|
||||
_out << it;
|
||||
if (++i < n) {
|
||||
_out << it << ", ";
|
||||
_out << ", ";
|
||||
}
|
||||
_out << it.first << " => " << it.second;
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ std::vector<std::string> GeneralRequest::decodedSuffixes() const {
|
|||
result.reserve(_suffixes.size());
|
||||
|
||||
for (auto const& it : _suffixes) {
|
||||
result.emplace_back(StringUtils::urlDecode(it));
|
||||
result.emplace_back(StringUtils::urlDecodePath(it));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -27,14 +27,14 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include "Endpoint/ConnectionInfo.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Dumper.h>
|
||||
#include <velocypack/Options.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "Endpoint/ConnectionInfo.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
|
@ -95,9 +95,6 @@ class GeneralRequest {
|
|||
void setProtocol(char const* protocol) { _protocol = protocol; }
|
||||
|
||||
ConnectionInfo const& connectionInfo() const { return _connectionInfo; }
|
||||
void setConnectionInfo(ConnectionInfo const& connectionInfo) {
|
||||
_connectionInfo = connectionInfo;
|
||||
}
|
||||
|
||||
uint64_t clientTaskId() const { return _clientTaskId; }
|
||||
void setClientTaskId(uint64_t clientTaskId) { _clientTaskId = clientTaskId; }
|
||||
|
@ -126,6 +123,12 @@ class GeneralRequest {
|
|||
void setRequestPath(std::string const& requestPath) {
|
||||
_requestPath = requestPath;
|
||||
}
|
||||
void setRequestPath(char const* begin) {
|
||||
_requestPath = std::string(begin);
|
||||
}
|
||||
void setRequestPath(char const* begin, char const* end) {
|
||||
_requestPath = std::string(begin, end - begin);
|
||||
}
|
||||
|
||||
// The request path consists of the URL without the host and without any
|
||||
// parameters. The request path is split into two parts: the prefix, namely
|
||||
|
|
|
@ -296,7 +296,7 @@ void HttpRequest::parseHeader(size_t length) {
|
|||
}
|
||||
|
||||
if (pathBegin < pathEnd) {
|
||||
setRequestPath(pathBegin);
|
||||
setRequestPath(pathBegin, pathEnd);
|
||||
}
|
||||
|
||||
if (paramBegin < paramEnd) {
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
#!/bin/bash
|
||||
|
||||
##python3-setuptools
|
||||
##
|
||||
##python setup.py install
|
||||
##
|
||||
##node npm
|
||||
##
|
||||
##https://github.com/GitbookIO/gitbook
|
||||
## npm install gitbook-cli -g
|
||||
##
|
||||
## http://calibre-ebook.com/download
|
||||
|
||||
test_tools(){
|
||||
if ! type easy_install3 >> /dev/null; then
|
||||
echo "you are missing setuptools"
|
||||
echo "apt-get install python-setuptools"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! type node >> /dev/null; then
|
||||
echo "you are missing node"
|
||||
echo "apt-get install nodejs nodejs-legacy"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! type npm >> /dev/null; then
|
||||
echo "you are missing node"
|
||||
echo "apt-get install npm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! type calibre >> /dev/null; then
|
||||
echo "you are missing node"
|
||||
echo "apt-get install calibre-bin"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_tools(){
|
||||
(
|
||||
if ! [[ -f markdown-pp ]]; then
|
||||
git clone https://github.com/arangodb-helper/markdown-pp/
|
||||
fi
|
||||
cd markdown-pp
|
||||
python2 setup.py install --user
|
||||
)
|
||||
npm install gitbook-cli
|
||||
|
||||
|
||||
}
|
||||
|
||||
main(){
|
||||
#test for basic tools
|
||||
test_tools
|
||||
|
||||
#cd into target dir
|
||||
mkdir -p "$1"
|
||||
cd $1 || { echo "unable to change into $1"; exit 1; }
|
||||
|
||||
install_tools
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
Loading…
Reference in New Issue