mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api
This commit is contained in:
commit
ab11410911
25
CHANGELOG
25
CHANGELOG
|
@ -1,16 +1,39 @@
|
||||||
devel
|
devel
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
|
||||||
|
v3.2.alpha1 (2017-02-05)
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
* added figure `httpRequests` to AQL query statistics
|
||||||
|
|
||||||
|
* removed revisions cache intermediate layer implementation
|
||||||
|
|
||||||
|
* obsoleted startup options `--database.revision-cache-chunk-size` and
|
||||||
|
`--database.revision-cache-target-size`
|
||||||
|
|
||||||
|
* fix potential port number over-/underruns
|
||||||
|
|
||||||
* added startup option `--log.shorten-filenames` for controlling whether filenames
|
* added startup option `--log.shorten-filenames` for controlling whether filenames
|
||||||
in log message should be shortened to just the filename with the absolute path
|
in log message should be shortened to just the filename with the absolute path
|
||||||
|
|
||||||
* removed IndexThreadFeature, made --database.index-threads option obsolete
|
* removed IndexThreadFeature, made `--database.index-threads` option obsolete
|
||||||
|
|
||||||
* changed index filling to make it more parallel, dispatch tasks to boost::asio
|
* changed index filling to make it more parallel, dispatch tasks to boost::asio
|
||||||
|
|
||||||
* more detailed stacktraces in Foxx apps
|
* more detailed stacktraces in Foxx apps
|
||||||
|
|
||||||
|
|
||||||
|
v3.1.11 (XXXX-XX-XX)
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
* fixed sort issue in cluster, occurring when one of the local sort buffers of a
|
||||||
|
GatherNode was empty
|
||||||
|
|
||||||
|
* reduce number of HTTP requests made for certain kinds of join queries in cluster,
|
||||||
|
leading to speedup of some join queries
|
||||||
|
|
||||||
|
|
||||||
v3.1.10 (2017-XX-XX)
|
v3.1.10 (2017-XX-XX)
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
|
|
|
@ -489,6 +489,8 @@ if (USE_MAINTAINER_MODE)
|
||||||
find_program(AWK_EXECUTABLE awk)
|
find_program(AWK_EXECUTABLE awk)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
find_program(FILE_EXECUTABLE file)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
## FAILURE TESTS
|
## FAILURE TESTS
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -90,6 +90,42 @@ Create a geo index for a hash array attribute:
|
||||||
@END_EXAMPLE_ARANGOSH_OUTPUT
|
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||||
@endDocuBlock geoIndexCreateForArrayAttribute2
|
@endDocuBlock geoIndexCreateForArrayAttribute2
|
||||||
|
|
||||||
|
Use GeoIndex with AQL SORT statement:
|
||||||
|
|
||||||
|
@startDocuBlockInline geoIndexSortOptimization
|
||||||
|
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexSortOptimization}
|
||||||
|
~db._create("geoSort")
|
||||||
|
db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
|
||||||
|
| for (i = -90; i <= 90; i += 10) {
|
||||||
|
| for (j = -180; j <= 180; j += 10) {
|
||||||
|
| db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
|
||||||
|
| }
|
||||||
|
}
|
||||||
|
var query = "FOR doc in geoSort SORT distance(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc"
|
||||||
|
db._explain(query, {}, {colors: false});
|
||||||
|
db._query(query);
|
||||||
|
~db._drop("geoSort")
|
||||||
|
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||||
|
@endDocuBlock geoIndexSortOptimization
|
||||||
|
|
||||||
|
Use GeoIndex with AQL FILTER statement:
|
||||||
|
|
||||||
|
@startDocuBlockInline geoIndexFilterOptimization
|
||||||
|
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexFilterOptimization}
|
||||||
|
~db._create("geoFilter")
|
||||||
|
db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
|
||||||
|
| for (i = -90; i <= 90; i += 10) {
|
||||||
|
| for (j = -180; j <= 180; j += 10) {
|
||||||
|
| db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
|
||||||
|
| }
|
||||||
|
}
|
||||||
|
var query = "FOR doc in geoFilter FILTER distance(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc"
|
||||||
|
db._explain(query, {}, {colors: false});
|
||||||
|
db._query(query);
|
||||||
|
~db._drop("geoFilter")
|
||||||
|
@END_EXAMPLE_ARANGOSH_OUTPUT
|
||||||
|
@endDocuBlock geoIndexFilterOptimization
|
||||||
|
|
||||||
|
|
||||||
<!-- js/common/modules/@arangodb/arango-collection-common.js-->
|
<!-- js/common/modules/@arangodb/arango-collection-common.js-->
|
||||||
@startDocuBlock collectionGeo
|
@startDocuBlock collectionGeo
|
||||||
|
|
|
@ -273,8 +273,10 @@ The geo index provides operations to find documents with coordinates nearest to
|
||||||
comparison coordinate, and to find documents with coordinates that are within a specifiable
|
comparison coordinate, and to find documents with coordinates that are within a specifiable
|
||||||
radius around a comparison coordinate.
|
radius around a comparison coordinate.
|
||||||
|
|
||||||
The geo index is used via dedicated functions in AQL or the simple queries functions,
|
The geo index is used via dedicated functions in AQL, the simple queries
|
||||||
but will not be used for other types of queries or conditions.
|
functions and it is implicitly applied when in AQL a SORT or FILTER is used with
|
||||||
|
the distance function. Otherwise it will not be used for other types of queries
|
||||||
|
or conditions.
|
||||||
|
|
||||||
|
|
||||||
### Fulltext Index
|
### Fulltext Index
|
||||||
|
|
|
@ -71,7 +71,10 @@ different usage scenarios:
|
||||||
|
|
||||||
{ "coords": [ 50.9406645, 6.9599115 ] }
|
{ "coords": [ 50.9406645, 6.9599115 ] }
|
||||||
|
|
||||||
Geo indexes will only be invoked via special functions.
|
Geo indexes will be invoked via special functions or AQL optimization. The
|
||||||
|
optimization can be triggered when a collection with geo index is enumerated
|
||||||
|
and a SORT or FILTER statement is used in conjunction with the distance
|
||||||
|
function.
|
||||||
|
|
||||||
- fulltext index: a fulltext index can be used to index all words contained in
|
- fulltext index: a fulltext index can be used to index all words contained in
|
||||||
a specific attribute of all documents in a collection. Only words with a
|
a specific attribute of all documents in a collection. Only words with a
|
||||||
|
|
|
@ -120,11 +120,13 @@ fi
|
||||||
VERSION_MAJOR=`echo $VERSION | awk -F. '{print $1}'`
|
VERSION_MAJOR=`echo $VERSION | awk -F. '{print $1}'`
|
||||||
VERSION_MINOR=`echo $VERSION | awk -F. '{print $2}'`
|
VERSION_MINOR=`echo $VERSION | awk -F. '{print $2}'`
|
||||||
VERSION_REVISION=`echo $VERSION | awk -F. '{print $3}'`
|
VERSION_REVISION=`echo $VERSION | awk -F. '{print $3}'`
|
||||||
|
VERSION_PACKAGE="1"
|
||||||
|
|
||||||
cat CMakeLists.txt \
|
cat CMakeLists.txt \
|
||||||
| sed -e "s~set(ARANGODB_VERSION_MAJOR.*~set(ARANGODB_VERSION_MAJOR \"$VERSION_MAJOR\")~" \
|
| sed -e "s~set(ARANGODB_VERSION_MAJOR.*~set(ARANGODB_VERSION_MAJOR \"$VERSION_MAJOR\")~" \
|
||||||
| sed -e "s~set(ARANGODB_VERSION_MINOR.*~set(ARANGODB_VERSION_MINOR \"$VERSION_MINOR\")~" \
|
| sed -e "s~set(ARANGODB_VERSION_MINOR.*~set(ARANGODB_VERSION_MINOR \"$VERSION_MINOR\")~" \
|
||||||
| sed -e "s~set(ARANGODB_VERSION_REVISION.*~set(ARANGODB_VERSION_REVISION \"$VERSION_REVISION\")~" \
|
| sed -e "s~set(ARANGODB_VERSION_REVISION.*~set(ARANGODB_VERSION_REVISION \"$VERSION_REVISION\")~" \
|
||||||
|
| sed -e "s~set(ARANGODB_PACKAGE_REVISION.*~set(ARANGODB_PACKAGE_REVISION \"$VERSION_PACKAGE\")~" \
|
||||||
> CMakeLists.txt.tmp
|
> CMakeLists.txt.tmp
|
||||||
|
|
||||||
mv CMakeLists.txt.tmp CMakeLists.txt
|
mv CMakeLists.txt.tmp CMakeLists.txt
|
||||||
|
|
|
@ -49,8 +49,6 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
|
||||||
// --SECTION-- macros
|
// --SECTION-- macros
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
#define DELETE_ENDPOINT(e) if (e != 0) delete e;
|
|
||||||
|
|
||||||
#define FACTORY_NAME(name) name ## Factory
|
#define FACTORY_NAME(name) name ## Factory
|
||||||
|
|
||||||
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
|
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
|
||||||
|
@ -58,12 +56,12 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
|
||||||
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
|
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
|
||||||
e = FACTORY(type, specification); \
|
e = FACTORY(type, specification); \
|
||||||
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
|
|
||||||
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
|
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
|
||||||
e = arangodb::Endpoint::serverFactory(specification, 1, true); \
|
e = arangodb::Endpoint::serverFactory(specification, 1, true); \
|
||||||
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
BOOST_CHECK_EQUAL((expected), (e->feature())); \
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// --SECTION-- setup / tear-down
|
// --SECTION-- setup / tear-down
|
||||||
|
@ -118,6 +116,11 @@ BOOST_AUTO_TEST_CASE (EndpointInvalid) {
|
||||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
|
||||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
|
||||||
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
|
||||||
|
|
||||||
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65536"));
|
||||||
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65537"));
|
||||||
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:-1"));
|
||||||
|
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:6555555555"));
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -491,7 +494,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer1) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
|
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -503,7 +506,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer2) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
|
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -516,7 +519,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer3) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
|
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -529,7 +532,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient1) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
|
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -541,7 +544,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient2) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
|
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -554,7 +557,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient3) {
|
||||||
|
|
||||||
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
|
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -575,7 +578,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerTcpIpv4WithPort) {
|
||||||
BOOST_CHECK_EQUAL(667, e->port());
|
BOOST_CHECK_EQUAL(667, e->port());
|
||||||
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
|
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -596,7 +599,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerUnix) {
|
||||||
BOOST_CHECK_EQUAL(0, e->port());
|
BOOST_CHECK_EQUAL(0, e->port());
|
||||||
BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
|
BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -617,7 +620,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientSslIpV6WithPortHttp) {
|
||||||
BOOST_CHECK_EQUAL(43425, e->port());
|
BOOST_CHECK_EQUAL(43425, e->port());
|
||||||
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
|
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -637,7 +640,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientTcpIpv6WithoutPort) {
|
||||||
BOOST_CHECK_EQUAL(8529, e->port());
|
BOOST_CHECK_EQUAL(8529, e->port());
|
||||||
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
|
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
|
||||||
BOOST_CHECK_EQUAL(false, e->isConnected());
|
BOOST_CHECK_EQUAL(false, e->isConnected());
|
||||||
DELETE_ENDPOINT(e);
|
delete e;
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
|
|
||||||
static bool Initialized = false;
|
static bool Initialized = false;
|
||||||
|
static uint64_t counter = 0;
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// --SECTION-- setup / tear-down
|
// --SECTION-- setup / tear-down
|
||||||
|
@ -73,8 +74,6 @@ struct CFilesSetup {
|
||||||
}
|
}
|
||||||
|
|
||||||
StringBuffer* writeFile (const char* blob) {
|
StringBuffer* writeFile (const char* blob) {
|
||||||
static uint64_t counter = 0;
|
|
||||||
|
|
||||||
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
|
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
|
||||||
filename->appendText(_directory);
|
filename->appendText(_directory);
|
||||||
filename->appendText("/tmp-");
|
filename->appendText("/tmp-");
|
||||||
|
@ -108,6 +107,71 @@ struct CFilesSetup {
|
||||||
|
|
||||||
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
|
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE (tst_createdirectory) {
|
||||||
|
std::ostringstream out;
|
||||||
|
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||||
|
|
||||||
|
std::string filename = out.str();
|
||||||
|
long unused1;
|
||||||
|
std::string unused2;
|
||||||
|
int res = TRI_CreateDirectory(filename.c_str(), unused1, unused2);
|
||||||
|
BOOST_CHECK_EQUAL(0, res);
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename.c_str()));
|
||||||
|
|
||||||
|
res = TRI_RemoveDirectory(filename.c_str());
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE (tst_createdirectoryrecursive) {
|
||||||
|
std::ostringstream out;
|
||||||
|
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||||
|
|
||||||
|
std::string filename1 = out.str();
|
||||||
|
out << "/abc";
|
||||||
|
std::string filename2 = out.str();
|
||||||
|
|
||||||
|
long unused1;
|
||||||
|
std::string unused2;
|
||||||
|
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
|
||||||
|
BOOST_CHECK_EQUAL(0, res);
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
|
||||||
|
|
||||||
|
res = TRI_RemoveDirectory(filename1.c_str());
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE (tst_removedirectorydeterministic) {
|
||||||
|
std::ostringstream out;
|
||||||
|
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
|
||||||
|
|
||||||
|
std::string filename1 = out.str();
|
||||||
|
out << "/abc";
|
||||||
|
std::string filename2 = out.str();
|
||||||
|
|
||||||
|
long unused1;
|
||||||
|
std::string unused2;
|
||||||
|
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
|
||||||
|
BOOST_CHECK_EQUAL(0, res);
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
|
||||||
|
|
||||||
|
res = TRI_RemoveDirectoryDeterministic(filename1.c_str());
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief test file exists
|
/// @brief test file exists
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -116,6 +180,7 @@ BOOST_AUTO_TEST_CASE (tst_existsfile) {
|
||||||
StringBuffer* filename = writeFile("");
|
StringBuffer* filename = writeFile("");
|
||||||
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
|
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
|
||||||
TRI_UnlinkFile(filename->c_str());
|
TRI_UnlinkFile(filename->c_str());
|
||||||
|
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename->c_str()));
|
||||||
|
|
||||||
delete filename;
|
delete filename;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,11 +25,7 @@
|
||||||
#include "Agency/Agent.h"
|
#include "Agency/Agent.h"
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
#include <velocypack/Iterator.h>
|
|
||||||
#include <velocypack/velocypack-aliases.h>
|
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// DISCLAIMER
|
/// DISCLAIMER
|
||||||
///
|
///
|
||||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||||
|
@ -54,7 +54,6 @@
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
using namespace arangodb::application_features;
|
using namespace arangodb::application_features;
|
||||||
using namespace arangodb::basics;
|
|
||||||
using namespace arangodb::httpclient;
|
using namespace arangodb::httpclient;
|
||||||
using namespace arangodb::rest;
|
using namespace arangodb::rest;
|
||||||
|
|
||||||
|
@ -450,7 +449,7 @@ std::string AgencyCommManager::path(std::string const& p1) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/");
|
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string AgencyCommManager::path(std::string const& p1,
|
std::string AgencyCommManager::path(std::string const& p1,
|
||||||
|
@ -459,8 +458,8 @@ std::string AgencyCommManager::path(std::string const& p1,
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/") + "/" +
|
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/") + "/" +
|
||||||
StringUtils::trim(p2, "/");
|
basics::StringUtils::trim(p2, "/");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string AgencyCommManager::generateStamp() {
|
std::string AgencyCommManager::generateStamp() {
|
||||||
|
@ -674,7 +673,7 @@ void AgencyCommManager::removeEndpoint(std::string const& endpoint) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string AgencyCommManager::endpointsString() const {
|
std::string AgencyCommManager::endpointsString() const {
|
||||||
return StringUtils::join(endpoints(), ", ");
|
return basics::StringUtils::join(endpoints(), ", ");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> AgencyCommManager::endpoints() const {
|
std::vector<std::string> AgencyCommManager::endpoints() const {
|
||||||
|
@ -1280,7 +1279,7 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) {
|
||||||
for (const auto& i : VPackObjectIterator(current)) {
|
for (const auto& i : VPackObjectIterator(current)) {
|
||||||
auto const endpoint = Endpoint::unifiedForm(i.value.copyString());
|
auto const endpoint = Endpoint::unifiedForm(i.value.copyString());
|
||||||
if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) {
|
if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) {
|
||||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||||
<< "Adding endpoint " << endpoint << " to agent pool";
|
<< "Adding endpoint " << endpoint << " to agent pool";
|
||||||
AgencyCommManager::MANAGER->addEndpoint(endpoint);
|
AgencyCommManager::MANAGER->addEndpoint(endpoint);
|
||||||
}
|
}
|
||||||
|
@ -1391,7 +1390,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
||||||
b.add(VPackValue(clientId));
|
b.add(VPackValue(clientId));
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
|
||||||
"Failed agency comm (" << result._statusCode << ")! " <<
|
"Failed agency comm (" << result._statusCode << ")! " <<
|
||||||
"Inquiring about clientId " << clientId << ".";
|
"Inquiring about clientId " << clientId << ".";
|
||||||
|
|
||||||
|
@ -1410,25 +1409,25 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
||||||
for (auto const& i : VPackArrayIterator(inner)) {
|
for (auto const& i : VPackArrayIterator(inner)) {
|
||||||
if (i.isUInt()) {
|
if (i.isUInt()) {
|
||||||
if (i.getUInt() == 0) {
|
if (i.getUInt() == 0) {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||||
<< body << " failed: " << outer.toJson();
|
<< body << " failed: " << outer.toJson();
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
success = true;
|
success = true;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||||
<< body << " failed with " << outer.toJson();
|
<< body << " failed with " << outer.toJson();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (success) {
|
if (success) {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||||
<< body << " succeeded (" << outer.toJson() << ")";
|
<< body << " succeeded (" << outer.toJson() << ")";
|
||||||
return inq;
|
return inq;
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
|
||||||
<< body << " failed (" << outer.toJson() << ")";
|
<< body << " failed (" << outer.toJson() << ")";
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1437,7 +1436,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
||||||
}
|
}
|
||||||
return inq;
|
return inq;
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
|
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
|
||||||
"Inquiry failed (" << inq._statusCode << "). Keep trying ...";
|
"Inquiry failed (" << inq._statusCode << "). Keep trying ...";
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ AgencyFeature::AgencyFeature(application_features::ApplicationServer* server)
|
||||||
_supervision(false),
|
_supervision(false),
|
||||||
_waitForSync(true),
|
_waitForSync(true),
|
||||||
_supervisionFrequency(5.0),
|
_supervisionFrequency(5.0),
|
||||||
_compactionStepSize(2000),
|
_compactionStepSize(200000),
|
||||||
_compactionKeepSize(500),
|
_compactionKeepSize(500),
|
||||||
_supervisionGracePeriod(15.0),
|
_supervisionGracePeriod(15.0),
|
||||||
_cmdLineTimings(false)
|
_cmdLineTimings(false)
|
||||||
|
@ -232,7 +232,7 @@ void AgencyFeature::start() {
|
||||||
|
|
||||||
_agent.reset(new consensus::Agent(consensus::config_t(
|
_agent.reset(new consensus::Agent(consensus::config_t(
|
||||||
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
|
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
|
||||||
_agencyEndpoints, _supervision, _waitForSync, _supervisionFrequency,
|
_agencyEndpoints, _supervision, false, _supervisionFrequency,
|
||||||
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
|
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
|
||||||
_cmdLineTimings)));
|
_cmdLineTimings)));
|
||||||
|
|
||||||
|
|
|
@ -257,7 +257,7 @@ bool Agent::recvAppendEntriesRPC(
|
||||||
term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm,
|
term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm,
|
||||||
index_t leaderCommitIndex, query_t const& queries) {
|
index_t leaderCommitIndex, query_t const& queries) {
|
||||||
|
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got AppendEntriesRPC from "
|
LOG_TOPIC(TRACE, Logger::AGENCY) << "Got AppendEntriesRPC from "
|
||||||
<< leaderId << " with term " << term;
|
<< leaderId << " with term " << term;
|
||||||
|
|
||||||
// Update commit index
|
// Update commit index
|
||||||
|
@ -276,40 +276,34 @@ bool Agent::recvAppendEntriesRPC(
|
||||||
size_t nqs = queries->slice().length();
|
size_t nqs = queries->slice().length();
|
||||||
|
|
||||||
// State machine, _lastCommitIndex to advance atomically
|
// State machine, _lastCommitIndex to advance atomically
|
||||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
|
||||||
|
|
||||||
if (nqs > 0) {
|
if (nqs > 0) {
|
||||||
|
|
||||||
|
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||||
|
|
||||||
size_t ndups = _state.removeConflicts(queries);
|
size_t ndups = _state.removeConflicts(queries);
|
||||||
|
|
||||||
if (nqs > ndups) {
|
if (nqs > ndups) {
|
||||||
LOG_TOPIC(TRACE, Logger::AGENCY)
|
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||||
<< "Appending " << nqs - ndups << " entries to state machine. ("
|
<< "Appending " << nqs - ndups << " entries to state machine. ("
|
||||||
<< nqs << ", " << ndups << ")";
|
<< nqs << ", " << ndups << "): " << queries->slice().toJson() ;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
_state.log(queries, ndups);
|
|
||||||
|
_lastCommitIndex = _state.log(queries, ndups);
|
||||||
|
|
||||||
|
if (_lastCommitIndex >= _nextCompationAfter) {
|
||||||
|
_state.compact(_lastCommitIndex);
|
||||||
|
_nextCompationAfter += _config.compactionStepSize();
|
||||||
|
}
|
||||||
|
|
||||||
} catch (std::exception const&) {
|
} catch (std::exception const&) {
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||||
<< "Malformed query: " << __FILE__ << __LINE__;
|
<< "Malformed query: " << __FILE__ << __LINE__;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
_spearhead.apply(
|
|
||||||
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
|
|
||||||
_constituent.term());
|
|
||||||
|
|
||||||
_readDB.apply(
|
|
||||||
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
|
|
||||||
_constituent.term());
|
|
||||||
|
|
||||||
_lastCommitIndex = leaderCommitIndex;
|
|
||||||
|
|
||||||
if (_lastCommitIndex >= _nextCompationAfter) {
|
|
||||||
_state.compact(_lastCommitIndex);
|
|
||||||
_nextCompationAfter += _config.compactionStepSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -348,7 +342,7 @@ void Agent::sendAppendEntriesRPC() {
|
||||||
duration<double> m = system_clock::now() - _lastSent[followerId];
|
duration<double> m = system_clock::now() - _lastSent[followerId];
|
||||||
|
|
||||||
if (highest == _lastHighest[followerId] &&
|
if (highest == _lastHighest[followerId] &&
|
||||||
m.count() < 0.5 * _config.minPing()) {
|
m.count() < 0.25 * _config.minPing()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1122,10 +1116,10 @@ bool Agent::rebuildDBs() {
|
||||||
|
|
||||||
MUTEX_LOCKER(mutexLocker, _ioLock);
|
MUTEX_LOCKER(mutexLocker, _ioLock);
|
||||||
|
|
||||||
_spearhead.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
|
_spearhead.apply(
|
||||||
_constituent.term());
|
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
|
||||||
_readDB.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
|
_readDB.apply(
|
||||||
_constituent.term());
|
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@
|
||||||
#include "Agency/MoveShard.h"
|
#include "Agency/MoveShard.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
|
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId,
|
std::string const& jobId,
|
||||||
|
|
|
@ -468,11 +468,13 @@ void Constituent::callElection() {
|
||||||
void Constituent::update(std::string const& leaderID, term_t t) {
|
void Constituent::update(std::string const& leaderID, term_t t) {
|
||||||
MUTEX_LOCKER(guard, _castLock);
|
MUTEX_LOCKER(guard, _castLock);
|
||||||
_term = t;
|
_term = t;
|
||||||
|
|
||||||
if (_leaderID != leaderID) {
|
if (_leaderID != leaderID) {
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||||
<< "Constituent::update: setting _leaderID to " << leaderID
|
<< "Constituent::update: setting _leaderID to " << leaderID
|
||||||
<< " in term " << _term;
|
<< " in term " << _term;
|
||||||
_leaderID = leaderID;
|
_leaderID = leaderID;
|
||||||
|
_role = FOLLOWER;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -546,6 +548,11 @@ void Constituent::run() {
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID
|
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID
|
||||||
<< " in term " << _term;
|
<< " in term " << _term;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
{
|
||||||
|
MUTEX_LOCKER(guard, _castLock);
|
||||||
|
_role = FOLLOWER;
|
||||||
|
}
|
||||||
while (!this->isStopping()) {
|
while (!this->isStopping()) {
|
||||||
if (_role == FOLLOWER) {
|
if (_role == FOLLOWER) {
|
||||||
static double const M = 1.0e6;
|
static double const M = 1.0e6;
|
||||||
|
|
|
@ -75,7 +75,7 @@ bool FailedFollower::create() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_jb = std::make_shared<velocypack::Builder>();
|
_jb = std::make_shared<Builder>();
|
||||||
_jb->openArray();
|
_jb->openArray();
|
||||||
_jb->openObject();
|
_jb->openObject();
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ bool FailedFollower::start() {
|
||||||
|
|
||||||
|
|
||||||
// Copy todo to pending
|
// Copy todo to pending
|
||||||
velocypack::Builder todo, pending;
|
Builder todo, pending;
|
||||||
|
|
||||||
// Get todo entry
|
// Get todo entry
|
||||||
todo.openArray();
|
todo.openArray();
|
||||||
|
@ -254,7 +254,7 @@ JOB_STATUS FailedFollower::status() {
|
||||||
|
|
||||||
if (compareServerLists(planned.slice(), current.slice())) {
|
if (compareServerLists(planned.slice(), current.slice())) {
|
||||||
// Remove shard from /arango/Target/FailedServers/<server> array
|
// Remove shard from /arango/Target/FailedServers/<server> array
|
||||||
velocypack::Builder del;
|
Builder del;
|
||||||
del.openArray();
|
del.openArray();
|
||||||
del.openObject();
|
del.openObject();
|
||||||
std::string path = _agencyPrefix + failedServersPrefix + "/" + _from;
|
std::string path = _agencyPrefix + failedServersPrefix + "/" + _from;
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -58,7 +58,7 @@ bool FailedServer::start() {
|
||||||
<< "Start FailedServer job " + _jobId + " for server " + _server;
|
<< "Start FailedServer job " + _jobId + " for server " + _server;
|
||||||
|
|
||||||
// Copy todo to pending
|
// Copy todo to pending
|
||||||
velocypack::Builder todo, pending;
|
Builder todo, pending;
|
||||||
|
|
||||||
// Get todo entry
|
// Get todo entry
|
||||||
todo.openArray();
|
todo.openArray();
|
||||||
|
@ -210,7 +210,7 @@ bool FailedServer::create() {
|
||||||
|
|
||||||
std::string path = _agencyPrefix + toDoPrefix + _jobId;
|
std::string path = _agencyPrefix + toDoPrefix + _jobId;
|
||||||
|
|
||||||
_jb = std::make_shared<velocypack::Builder>();
|
_jb = std::make_shared<Builder>();
|
||||||
_jb->openArray();
|
_jb->openArray();
|
||||||
_jb->openObject();
|
_jb->openObject();
|
||||||
|
|
||||||
|
@ -271,7 +271,7 @@ JOB_STATUS FailedServer::status() {
|
||||||
// mop: ohhh...server is healthy again!
|
// mop: ohhh...server is healthy again!
|
||||||
bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD;
|
bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD;
|
||||||
|
|
||||||
std::shared_ptr<velocypack::Builder> deleteTodos;
|
std::shared_ptr<Builder> deleteTodos;
|
||||||
|
|
||||||
Node::Children const todos = _snapshot(toDoPrefix).children();
|
Node::Children const todos = _snapshot(toDoPrefix).children();
|
||||||
Node::Children const pends = _snapshot(pendingPrefix).children();
|
Node::Children const pends = _snapshot(pendingPrefix).children();
|
||||||
|
@ -281,7 +281,7 @@ JOB_STATUS FailedServer::status() {
|
||||||
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
|
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
|
||||||
if (serverHealthy) {
|
if (serverHealthy) {
|
||||||
if (!deleteTodos) {
|
if (!deleteTodos) {
|
||||||
deleteTodos.reset(new velocypack::Builder());
|
deleteTodos.reset(new Builder());
|
||||||
deleteTodos->openArray();
|
deleteTodos->openArray();
|
||||||
deleteTodos->openObject();
|
deleteTodos->openObject();
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ void Inception::gossip() {
|
||||||
auto const version = config.version();
|
auto const version = config.version();
|
||||||
|
|
||||||
// Build gossip message
|
// Build gossip message
|
||||||
auto out = std::make_shared<velocypack::Builder>();
|
auto out = std::make_shared<Builder>();
|
||||||
out->openObject();
|
out->openObject();
|
||||||
out->add("endpoint", VPackValue(config.endpoint()));
|
out->add("endpoint", VPackValue(config.endpoint()));
|
||||||
out->add("id", VPackValue(config.id()));
|
out->add("id", VPackValue(config.id()));
|
||||||
|
@ -169,7 +169,7 @@ bool Inception::restartingActiveAgent() {
|
||||||
auto const& clientEp = myConfig.endpoint();
|
auto const& clientEp = myConfig.endpoint();
|
||||||
auto const majority = (myConfig.size()+1)/2;
|
auto const majority = (myConfig.size()+1)/2;
|
||||||
|
|
||||||
velocypack::Builder greeting;
|
Builder greeting;
|
||||||
{
|
{
|
||||||
VPackObjectBuilder b(&greeting);
|
VPackObjectBuilder b(&greeting);
|
||||||
greeting.add(clientId, VPackValue(clientEp));
|
greeting.add(clientId, VPackValue(clientEp));
|
||||||
|
@ -259,7 +259,7 @@ bool Inception::restartingActiveAgent() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto agency = std::make_shared<velocypack::Builder>();
|
auto agency = std::make_shared<Builder>();
|
||||||
agency->openObject();
|
agency->openObject();
|
||||||
agency->add("term", theirConfig.get("term"));
|
agency->add("term", theirConfig.get("term"));
|
||||||
agency->add("id", VPackValue(theirLeaderId));
|
agency->add("id", VPackValue(theirLeaderId));
|
||||||
|
@ -435,7 +435,7 @@ bool Inception::estimateRAFTInterval() {
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||||
<< "mean(" << mean << ") stdev(" << stdev<< ")";
|
<< "mean(" << mean << ") stdev(" << stdev<< ")";
|
||||||
|
|
||||||
velocypack::Builder measurement;
|
Builder measurement;
|
||||||
measurement.openObject();
|
measurement.openObject();
|
||||||
measurement.add("mean", VPackValue(mean));
|
measurement.add("mean", VPackValue(mean));
|
||||||
measurement.add("stdev", VPackValue(stdev));
|
measurement.add("stdev", VPackValue(stdev));
|
||||||
|
@ -541,8 +541,10 @@ void Inception::run() {
|
||||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
|
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
|
||||||
_agent->ready(true);
|
_agent->ready(true);
|
||||||
} else {
|
} else {
|
||||||
|
if (!this->isStopping()) {
|
||||||
LOG_TOPIC(FATAL, Logger::AGENCY)
|
LOG_TOPIC(FATAL, Logger::AGENCY)
|
||||||
<< "Unable to restart with persisted pool. Fatal exit.";
|
<< "Unable to restart with persisted pool. Fatal exit.";
|
||||||
|
}
|
||||||
FATAL_ERROR_EXIT();
|
FATAL_ERROR_EXIT();
|
||||||
// FATAL ERROR
|
// FATAL ERROR
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
|
||||||
bool arangodb::consensus::compareServerLists(velocypack::Slice plan, velocypack::Slice current) {
|
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
|
||||||
if (!plan.isArray() || !current.isArray()) {
|
if (!plan.isArray() || !current.isArray()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ JOB_STATUS Job::exists() const {
|
||||||
bool Job::finish(std::string const& type, bool success,
|
bool Job::finish(std::string const& type, bool success,
|
||||||
std::string const& reason) const {
|
std::string const& reason) const {
|
||||||
|
|
||||||
velocypack::Builder pending, finished;
|
Builder pending, finished;
|
||||||
|
|
||||||
// Get todo entry
|
// Get todo entry
|
||||||
pending.openArray();
|
pending.openArray();
|
||||||
|
|
|
@ -28,7 +28,6 @@
|
||||||
#include "Node.h"
|
#include "Node.h"
|
||||||
#include "Supervision.h"
|
#include "Supervision.h"
|
||||||
|
|
||||||
#include <velocypack/Builder.h>
|
|
||||||
#include <velocypack/Iterator.h>
|
#include <velocypack/Iterator.h>
|
||||||
#include <velocypack/Slice.h>
|
#include <velocypack/Slice.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
@ -42,7 +41,7 @@ namespace consensus {
|
||||||
// and all others followers. Both arguments must be arrays. Returns true,
|
// and all others followers. Both arguments must be arrays. Returns true,
|
||||||
// if the first items in both slice are equal and if both arrays contain
|
// if the first items in both slice are equal and if both arrays contain
|
||||||
// the same set of strings.
|
// the same set of strings.
|
||||||
bool compareServerLists(velocypack::Slice plan, velocypack::Slice current);
|
bool compareServerLists(Slice plan, Slice current);
|
||||||
|
|
||||||
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
||||||
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
||||||
|
@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers";
|
||||||
static std::string const healthPrefix = "/Supervision/Health/";
|
static std::string const healthPrefix = "/Supervision/Health/";
|
||||||
|
|
||||||
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
|
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
|
||||||
velocypack::Builder const& transaction,
|
Builder const& transaction,
|
||||||
bool waitForCommit = true) {
|
bool waitForCommit = true) {
|
||||||
query_t envelope = std::make_shared<velocypack::Builder>();
|
query_t envelope = std::make_shared<Builder>();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
envelope->openArray();
|
envelope->openArray();
|
||||||
|
@ -138,7 +137,7 @@ struct Job {
|
||||||
std::string _creator;
|
std::string _creator;
|
||||||
std::string _agencyPrefix;
|
std::string _agencyPrefix;
|
||||||
|
|
||||||
std::shared_ptr<velocypack::Builder> _jb;
|
std::shared_ptr<Builder> _jb;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@
|
||||||
static std::string const DBServer = "DBServer";
|
static std::string const DBServer = "DBServer";
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
|
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -33,9 +33,8 @@
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
using namespace arangodb;
|
|
||||||
using namespace arangodb::basics;
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::basics;
|
||||||
|
|
||||||
struct NotEmpty {
|
struct NotEmpty {
|
||||||
bool operator()(const std::string& s) { return !s.empty(); }
|
bool operator()(const std::string& s) { return !s.empty(); }
|
||||||
|
@ -89,16 +88,16 @@ Node::Node(std::string const& name, Store* store)
|
||||||
Node::~Node() {}
|
Node::~Node() {}
|
||||||
|
|
||||||
/// Get slice to value buffer
|
/// Get slice to value buffer
|
||||||
velocypack::Slice Node::slice() const {
|
Slice Node::slice() const {
|
||||||
// Some array
|
// Some array
|
||||||
if (_isArray) {
|
if (_isArray) {
|
||||||
rebuildVecBuf();
|
rebuildVecBuf();
|
||||||
return velocypack::Slice(_vecBuf.data());
|
return Slice(_vecBuf.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some value
|
// Some value
|
||||||
if (!_value.empty()) {
|
if (!_value.empty()) {
|
||||||
return velocypack::Slice(_value.front().data());
|
return Slice(_value.front().data());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty object
|
// Empty object
|
||||||
|
@ -107,10 +106,10 @@ velocypack::Slice Node::slice() const {
|
||||||
|
|
||||||
void Node::rebuildVecBuf() const {
|
void Node::rebuildVecBuf() const {
|
||||||
if (_vecBufDirty) { // Dirty vector buffer
|
if (_vecBufDirty) { // Dirty vector buffer
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
for (auto const& i : _value) {
|
for (auto const& i : _value) {
|
||||||
tmp.add(velocypack::Slice(i.data()));
|
tmp.add(Slice(i.data()));
|
||||||
}
|
}
|
||||||
tmp.close();
|
tmp.close();
|
||||||
_vecBuf = *tmp.steal();
|
_vecBuf = *tmp.steal();
|
||||||
|
@ -324,7 +323,7 @@ Store& Node::store() { return *(root()._store); }
|
||||||
Store const& Node::store() const { return *(root()._store); }
|
Store const& Node::store() const { return *(root()._store); }
|
||||||
|
|
||||||
// velocypack value type of this node
|
// velocypack value type of this node
|
||||||
velocypack::ValueType Node::valueType() const { return slice().type(); }
|
ValueType Node::valueType() const { return slice().type(); }
|
||||||
|
|
||||||
// file time to live entry for this node to now + millis
|
// file time to live entry for this node to now + millis
|
||||||
bool Node::addTimeToLive(long millis) {
|
bool Node::addTimeToLive(long millis) {
|
||||||
|
@ -359,7 +358,7 @@ namespace consensus {
|
||||||
/// Set value
|
/// Set value
|
||||||
template <>
|
template <>
|
||||||
bool Node::handle<SET>(VPackSlice const& slice) {
|
bool Node::handle<SET>(VPackSlice const& slice) {
|
||||||
VPackSlice val = slice.get("new");
|
Slice val = slice.get("new");
|
||||||
|
|
||||||
if (val.isObject()) {
|
if (val.isObject()) {
|
||||||
if (val.hasKey("op")) { // No longer a keyword but a regular key "op"
|
if (val.hasKey("op")) { // No longer a keyword but a regular key "op"
|
||||||
|
@ -394,12 +393,12 @@ bool Node::handle<SET>(VPackSlice const& slice) {
|
||||||
/// Increment integer value or set 1
|
/// Increment integer value or set 1
|
||||||
template <>
|
template <>
|
||||||
bool Node::handle<INCREMENT>(VPackSlice const& slice) {
|
bool Node::handle<INCREMENT>(VPackSlice const& slice) {
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openObject();
|
tmp.openObject();
|
||||||
try {
|
try {
|
||||||
tmp.add("tmp", velocypack::Value(this->slice().getInt() + 1));
|
tmp.add("tmp", Value(this->slice().getInt() + 1));
|
||||||
} catch (std::exception const&) {
|
} catch (std::exception const&) {
|
||||||
tmp.add("tmp", velocypack::Value(1));
|
tmp.add("tmp", Value(1));
|
||||||
}
|
}
|
||||||
tmp.close();
|
tmp.close();
|
||||||
*this = tmp.slice().get("tmp");
|
*this = tmp.slice().get("tmp");
|
||||||
|
@ -409,12 +408,12 @@ bool Node::handle<INCREMENT>(VPackSlice const& slice) {
|
||||||
/// Decrement integer value or set -1
|
/// Decrement integer value or set -1
|
||||||
template <>
|
template <>
|
||||||
bool Node::handle<DECREMENT>(VPackSlice const& slice) {
|
bool Node::handle<DECREMENT>(VPackSlice const& slice) {
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openObject();
|
tmp.openObject();
|
||||||
try {
|
try {
|
||||||
tmp.add("tmp", velocypack::Value(this->slice().getInt() - 1));
|
tmp.add("tmp", Value(this->slice().getInt() - 1));
|
||||||
} catch (std::exception const&) {
|
} catch (std::exception const&) {
|
||||||
tmp.add("tmp", velocypack::Value(-1));
|
tmp.add("tmp", Value(-1));
|
||||||
}
|
}
|
||||||
tmp.close();
|
tmp.close();
|
||||||
*this = tmp.slice().get("tmp");
|
*this = tmp.slice().get("tmp");
|
||||||
|
@ -429,7 +428,7 @@ bool Node::handle<PUSH>(VPackSlice const& slice) {
|
||||||
<< slice.toJson();
|
<< slice.toJson();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
if (this->slice().isArray()) {
|
if (this->slice().isArray()) {
|
||||||
for (auto const& old : VPackArrayIterator(this->slice())) tmp.add(old);
|
for (auto const& old : VPackArrayIterator(this->slice())) tmp.add(old);
|
||||||
|
@ -448,7 +447,7 @@ bool Node::handle<ERASE>(VPackSlice const& slice) {
|
||||||
<< "Operator erase without value to be erased: " << slice.toJson();
|
<< "Operator erase without value to be erased: " << slice.toJson();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
if (this->slice().isArray()) {
|
if (this->slice().isArray()) {
|
||||||
for (auto const& old : VPackArrayIterator(this->slice())) {
|
for (auto const& old : VPackArrayIterator(this->slice())) {
|
||||||
|
@ -475,7 +474,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
|
||||||
<< slice.toJson();
|
<< slice.toJson();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
if (this->slice().isArray()) {
|
if (this->slice().isArray()) {
|
||||||
for (auto const& old : VPackArrayIterator(this->slice())) {
|
for (auto const& old : VPackArrayIterator(this->slice())) {
|
||||||
|
@ -494,7 +493,7 @@ bool Node::handle<REPLACE>(VPackSlice const& slice) {
|
||||||
/// Remove element from end of array.
|
/// Remove element from end of array.
|
||||||
template <>
|
template <>
|
||||||
bool Node::handle<POP>(VPackSlice const& slice) {
|
bool Node::handle<POP>(VPackSlice const& slice) {
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
if (this->slice().isArray()) {
|
if (this->slice().isArray()) {
|
||||||
VPackArrayIterator it(this->slice());
|
VPackArrayIterator it(this->slice());
|
||||||
|
@ -519,7 +518,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
|
||||||
<< slice.toJson();
|
<< slice.toJson();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
tmp.add(slice.get("new"));
|
tmp.add(slice.get("new"));
|
||||||
if (this->slice().isArray()) {
|
if (this->slice().isArray()) {
|
||||||
|
@ -533,7 +532,7 @@ bool Node::handle<PREPEND>(VPackSlice const& slice) {
|
||||||
/// Remove element from front of array
|
/// Remove element from front of array
|
||||||
template <>
|
template <>
|
||||||
bool Node::handle<SHIFT>(VPackSlice const& slice) {
|
bool Node::handle<SHIFT>(VPackSlice const& slice) {
|
||||||
velocypack::Builder tmp;
|
Builder tmp;
|
||||||
tmp.openArray();
|
tmp.openArray();
|
||||||
if (this->slice().isArray()) { // If a
|
if (this->slice().isArray()) { // If a
|
||||||
VPackArrayIterator it(this->slice());
|
VPackArrayIterator it(this->slice());
|
||||||
|
@ -678,7 +677,7 @@ bool Node::applies(VPackSlice const& slice) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Node::toBuilder(velocypack::Builder& builder, bool showHidden) const {
|
void Node::toBuilder(Builder& builder, bool showHidden) const {
|
||||||
try {
|
try {
|
||||||
if (type() == NODE) {
|
if (type() == NODE) {
|
||||||
VPackObjectBuilder guard(&builder);
|
VPackObjectBuilder guard(&builder);
|
||||||
|
@ -729,7 +728,7 @@ Node::Children& Node::children() { return _children; }
|
||||||
Node::Children const& Node::children() const { return _children; }
|
Node::Children const& Node::children() const { return _children; }
|
||||||
|
|
||||||
std::string Node::toJson() const {
|
std::string Node::toJson() const {
|
||||||
velocypack::Builder builder;
|
Builder builder;
|
||||||
builder.openArray();
|
builder.openArray();
|
||||||
toBuilder(builder);
|
toBuilder(builder);
|
||||||
builder.close();
|
builder.close();
|
||||||
|
@ -796,7 +795,7 @@ std::string Node::getString() const {
|
||||||
return slice().copyString();
|
return slice().copyString();
|
||||||
}
|
}
|
||||||
|
|
||||||
velocypack::Slice Node::getArray() const {
|
Slice Node::getArray() const {
|
||||||
if (type() == NODE) {
|
if (type() == NODE) {
|
||||||
throw StoreException("Must not convert NODE type to array");
|
throw StoreException("Must not convert NODE type to array");
|
||||||
}
|
}
|
||||||
|
@ -804,6 +803,6 @@ velocypack::Slice Node::getArray() const {
|
||||||
throw StoreException("Not an array type");
|
throw StoreException("Not an array type");
|
||||||
}
|
}
|
||||||
rebuildVecBuf();
|
rebuildVecBuf();
|
||||||
return velocypack::Slice(_vecBuf.data());
|
return Slice(_vecBuf.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,8 @@ enum Operation {
|
||||||
REPLACE
|
REPLACE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
class StoreException : public std::exception {
|
class StoreException : public std::exception {
|
||||||
public:
|
public:
|
||||||
explicit StoreException(std::string const& message) : _message(message) {}
|
explicit StoreException(std::string const& message) : _message(message) {}
|
||||||
|
@ -159,7 +161,7 @@ class Node {
|
||||||
bool handle(arangodb::velocypack::Slice const&);
|
bool handle(arangodb::velocypack::Slice const&);
|
||||||
|
|
||||||
/// @brief Create Builder representing this store
|
/// @brief Create Builder representing this store
|
||||||
void toBuilder(velocypack::Builder&, bool showHidden = false) const;
|
void toBuilder(Builder&, bool showHidden = false) const;
|
||||||
|
|
||||||
/// @brief Access children
|
/// @brief Access children
|
||||||
Children& children();
|
Children& children();
|
||||||
|
@ -168,10 +170,10 @@ class Node {
|
||||||
Children const& children() const;
|
Children const& children() const;
|
||||||
|
|
||||||
/// @brief Create slice from value
|
/// @brief Create slice from value
|
||||||
velocypack::Slice slice() const;
|
Slice slice() const;
|
||||||
|
|
||||||
/// @brief Get value type
|
/// @brief Get value type
|
||||||
velocypack::ValueType valueType() const;
|
ValueType valueType() const;
|
||||||
|
|
||||||
/// @brief Add observer for this node
|
/// @brief Add observer for this node
|
||||||
bool addObserver(std::string const&);
|
bool addObserver(std::string const&);
|
||||||
|
@ -216,7 +218,7 @@ class Node {
|
||||||
std::string getString() const;
|
std::string getString() const;
|
||||||
|
|
||||||
/// @brief Get array value
|
/// @brief Get array value
|
||||||
velocypack::Slice getArray() const;
|
Slice getArray() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// @brief Add time to live entry
|
/// @brief Add time to live entry
|
||||||
|
@ -232,8 +234,8 @@ class Node {
|
||||||
Store* _store; ///< @brief Store
|
Store* _store; ///< @brief Store
|
||||||
Children _children; ///< @brief child nodes
|
Children _children; ///< @brief child nodes
|
||||||
TimePoint _ttl; ///< @brief my expiry
|
TimePoint _ttl; ///< @brief my expiry
|
||||||
std::vector<velocypack::Buffer<uint8_t>> _value; ///< @brief my value
|
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
|
||||||
mutable velocypack::Buffer<uint8_t> _vecBuf;
|
mutable Buffer<uint8_t> _vecBuf;
|
||||||
mutable bool _vecBufDirty;
|
mutable bool _vecBufDirty;
|
||||||
bool _isArray;
|
bool _isArray;
|
||||||
};
|
};
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
|
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -35,10 +35,10 @@
|
||||||
#include "Rest/Version.h"
|
#include "Rest/Version.h"
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
|
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
using namespace arangodb::consensus;
|
|
||||||
using namespace arangodb::rest;
|
using namespace arangodb::rest;
|
||||||
using namespace arangodb::velocypack;
|
using namespace arangodb::consensus;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief ArangoDB server
|
/// @brief ArangoDB server
|
||||||
|
|
|
@ -184,7 +184,7 @@ class State {
|
||||||
size_t _cur;
|
size_t _cur;
|
||||||
|
|
||||||
/// @brief Operation options
|
/// @brief Operation options
|
||||||
OperationOptions _options;
|
arangodb::OperationOptions _options;
|
||||||
|
|
||||||
/// @brief Empty log entry;
|
/// @brief Empty log entry;
|
||||||
static log_t emptyLog;
|
static log_t emptyLog;
|
||||||
|
|
|
@ -40,9 +40,8 @@
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
using namespace arangodb::basics;
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
using namespace arangodb::basics;
|
||||||
|
|
||||||
/// Non-Emptyness of string
|
/// Non-Emptyness of string
|
||||||
struct NotEmpty {
|
struct NotEmpty {
|
||||||
|
|
|
@ -28,9 +28,6 @@
|
||||||
#include "Basics/Thread.h"
|
#include "Basics/Thread.h"
|
||||||
#include "Node.h"
|
#include "Node.h"
|
||||||
|
|
||||||
#include <velocypack/Builder.h>
|
|
||||||
#include <velocypack/Slice.h>
|
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
namespace consensus {
|
namespace consensus {
|
||||||
|
|
||||||
|
@ -61,10 +58,10 @@ class Store : public arangodb::Thread {
|
||||||
std::vector<bool> apply(query_t const& query, bool verbose = false);
|
std::vector<bool> apply(query_t const& query, bool verbose = false);
|
||||||
|
|
||||||
/// @brief Apply single entry in query
|
/// @brief Apply single entry in query
|
||||||
bool apply(velocypack::Slice const& query, bool verbose = false);
|
bool apply(Slice const& query, bool verbose = false);
|
||||||
|
|
||||||
/// @brief Apply entry in query
|
/// @brief Apply entry in query
|
||||||
std::vector<bool> apply(std::vector<velocypack::Slice> const& query,
|
std::vector<bool> apply(std::vector<Slice> const& query,
|
||||||
index_t lastCommitIndex, term_t term,
|
index_t lastCommitIndex, term_t term,
|
||||||
bool inform = true);
|
bool inform = true);
|
||||||
|
|
||||||
|
@ -82,7 +79,7 @@ class Store : public arangodb::Thread {
|
||||||
bool start();
|
bool start();
|
||||||
|
|
||||||
/// @brief Dump everything to builder
|
/// @brief Dump everything to builder
|
||||||
void dumpToBuilder(velocypack::Builder&) const;
|
void dumpToBuilder(Builder&) const;
|
||||||
|
|
||||||
/// @brief Notify observers
|
/// @brief Notify observers
|
||||||
void notifyObservers() const;
|
void notifyObservers() const;
|
||||||
|
@ -93,7 +90,7 @@ class Store : public arangodb::Thread {
|
||||||
Store& operator=(VPackSlice const& slice);
|
Store& operator=(VPackSlice const& slice);
|
||||||
|
|
||||||
/// @brief Create Builder representing this store
|
/// @brief Create Builder representing this store
|
||||||
void toBuilder(velocypack::Builder&, bool showHidden = false) const;
|
void toBuilder(Builder&, bool showHidden = false) const;
|
||||||
|
|
||||||
/// @brief Copy out a node
|
/// @brief Copy out a node
|
||||||
Node get(std::string const& path) const;
|
Node get(std::string const& path) const;
|
||||||
|
|
|
@ -41,9 +41,9 @@
|
||||||
#include "Basics/MutexLocker.h"
|
#include "Basics/MutexLocker.h"
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
using namespace arangodb::application_features;
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
using namespace arangodb::application_features;
|
||||||
|
|
||||||
std::string Supervision::_agencyPrefix = "/arango";
|
std::string Supervision::_agencyPrefix = "/arango";
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
UnassumedLeadership::UnassumedLeadership(
|
UnassumedLeadership::UnassumedLeadership(
|
||||||
Node const& snapshot, Agent* agent, std::string const& jobId,
|
Node const& snapshot, Agent* agent, std::string const& jobId,
|
||||||
|
|
|
@ -39,7 +39,6 @@ using namespace arangodb;
|
||||||
using namespace arangodb::application_features;
|
using namespace arangodb::application_features;
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||||
|
|
|
@ -418,7 +418,6 @@ struct AstNode {
|
||||||
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
|
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
|
||||||
auto node = getAttributeAccessForVariable(allowIndexedAccess);
|
auto node = getAttributeAccessForVariable(allowIndexedAccess);
|
||||||
|
|
||||||
|
|
||||||
if (node == nullptr) {
|
if (node == nullptr) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ class AqlItemBlock;
|
||||||
|
|
||||||
class ExecutionEngine;
|
class ExecutionEngine;
|
||||||
|
|
||||||
class SingletonBlock : public ExecutionBlock {
|
class SingletonBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
SingletonBlock(ExecutionEngine* engine, SingletonNode const* ep)
|
SingletonBlock(ExecutionEngine* engine, SingletonNode const* ep)
|
||||||
: ExecutionBlock(engine, ep), _inputRegisterValues(nullptr), _whitelistBuilt(false) {}
|
: ExecutionBlock(engine, ep), _inputRegisterValues(nullptr), _whitelistBuilt(false) {}
|
||||||
|
@ -75,7 +75,7 @@ class SingletonBlock : public ExecutionBlock {
|
||||||
bool _whitelistBuilt;
|
bool _whitelistBuilt;
|
||||||
};
|
};
|
||||||
|
|
||||||
class FilterBlock : public ExecutionBlock {
|
class FilterBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
FilterBlock(ExecutionEngine*, FilterNode const*);
|
FilterBlock(ExecutionEngine*, FilterNode const*);
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ class FilterBlock : public ExecutionBlock {
|
||||||
BlockCollector _collector;
|
BlockCollector _collector;
|
||||||
};
|
};
|
||||||
|
|
||||||
class LimitBlock : public ExecutionBlock {
|
class LimitBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
LimitBlock(ExecutionEngine* engine, LimitNode const* ep)
|
LimitBlock(ExecutionEngine* engine, LimitNode const* ep)
|
||||||
: ExecutionBlock(engine, ep),
|
: ExecutionBlock(engine, ep),
|
||||||
|
@ -145,7 +145,7 @@ class LimitBlock : public ExecutionBlock {
|
||||||
bool const _fullCount;
|
bool const _fullCount;
|
||||||
};
|
};
|
||||||
|
|
||||||
class ReturnBlock : public ExecutionBlock {
|
class ReturnBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
ReturnBlock(ExecutionEngine* engine, ReturnNode const* ep)
|
ReturnBlock(ExecutionEngine* engine, ReturnNode const* ep)
|
||||||
: ExecutionBlock(engine, ep), _returnInheritedResults(false) {}
|
: ExecutionBlock(engine, ep), _returnInheritedResults(false) {}
|
||||||
|
@ -168,7 +168,7 @@ class ReturnBlock : public ExecutionBlock {
|
||||||
bool _returnInheritedResults;
|
bool _returnInheritedResults;
|
||||||
};
|
};
|
||||||
|
|
||||||
class NoResultsBlock : public ExecutionBlock {
|
class NoResultsBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
NoResultsBlock(ExecutionEngine* engine, NoResultsNode const* ep)
|
NoResultsBlock(ExecutionEngine* engine, NoResultsNode const* ep)
|
||||||
: ExecutionBlock(engine, ep) {}
|
: ExecutionBlock(engine, ep) {}
|
||||||
|
|
|
@ -34,7 +34,7 @@ class AqlItemBlock;
|
||||||
|
|
||||||
class ExecutionEngine;
|
class ExecutionEngine;
|
||||||
|
|
||||||
class CalculationBlock : public ExecutionBlock {
|
class CalculationBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
CalculationBlock(ExecutionEngine*, CalculationNode const*);
|
CalculationBlock(ExecutionEngine*, CalculationNode const*);
|
||||||
|
|
||||||
|
|
|
@ -330,6 +330,16 @@ AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
|
||||||
delete cur;
|
delete cur;
|
||||||
_gatherBlockBuffer.at(val.first).pop_front();
|
_gatherBlockBuffer.at(val.first).pop_front();
|
||||||
_gatherBlockPos.at(val.first) = std::make_pair(val.first, 0);
|
_gatherBlockPos.at(val.first) = std::make_pair(val.first, 0);
|
||||||
|
|
||||||
|
if (_gatherBlockBuffer.at(val.first).empty()) {
|
||||||
|
// if we pulled everything from the buffer, we need to fetch
|
||||||
|
// more data for the shard for which we have no more local
|
||||||
|
// values.
|
||||||
|
getBlock(val.first, atLeast, atMost);
|
||||||
|
// note that if getBlock() returns false here, this is not
|
||||||
|
// a problem, because the sort function used takes care of
|
||||||
|
// this
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ class ExecutionEngine;
|
||||||
|
|
||||||
typedef std::vector<Aggregator*> AggregateValuesType;
|
typedef std::vector<Aggregator*> AggregateValuesType;
|
||||||
|
|
||||||
class SortedCollectBlock : public ExecutionBlock {
|
class SortedCollectBlock final : public ExecutionBlock {
|
||||||
private:
|
private:
|
||||||
typedef std::vector<Aggregator*> AggregateValuesType;
|
typedef std::vector<Aggregator*> AggregateValuesType;
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ class AqlItemBlock;
|
||||||
struct Collection;
|
struct Collection;
|
||||||
class ExecutionEngine;
|
class ExecutionEngine;
|
||||||
|
|
||||||
class EnumerateCollectionBlock : public ExecutionBlock {
|
class EnumerateCollectionBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
EnumerateCollectionBlock(ExecutionEngine* engine,
|
EnumerateCollectionBlock(ExecutionEngine* engine,
|
||||||
EnumerateCollectionNode const* ep);
|
EnumerateCollectionNode const* ep);
|
||||||
|
|
|
@ -1139,7 +1139,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
|
||||||
regsToClear.emplace(r);
|
regsToClear.emplace(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
en->setRegsToClear(regsToClear);
|
en->setRegsToClear(std::move(regsToClear));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -586,8 +586,8 @@ class ExecutionNode {
|
||||||
void toVelocyPackHelperGeneric(arangodb::velocypack::Builder&, bool) const;
|
void toVelocyPackHelperGeneric(arangodb::velocypack::Builder&, bool) const;
|
||||||
|
|
||||||
/// @brief set regs to be deleted
|
/// @brief set regs to be deleted
|
||||||
void setRegsToClear(std::unordered_set<RegisterId> const& toClear) {
|
void setRegsToClear(std::unordered_set<RegisterId>&& toClear) {
|
||||||
_regsToClear = toClear;
|
_regsToClear = std::move(toClear);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
@ -60,7 +60,7 @@ struct NonConstExpression {
|
||||||
~NonConstExpression() { delete expression; }
|
~NonConstExpression() { delete expression; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class IndexBlock : public ExecutionBlock {
|
class IndexBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
IndexBlock(ExecutionEngine* engine, IndexNode const* ep);
|
IndexBlock(ExecutionEngine* engine, IndexNode const* ep);
|
||||||
|
|
||||||
|
|
|
@ -4100,47 +4100,54 @@ MMFilesGeoIndexInfo iterativePreorderWithCondition(EN::NodeType type, AstNode* r
|
||||||
return MMFilesGeoIndexInfo{};
|
return MMFilesGeoIndexInfo{};
|
||||||
}
|
}
|
||||||
|
|
||||||
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode*,AstNode*> const& pair, ExecutionPlan* plan, MMFilesGeoIndexInfo info){
|
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode const*, AstNode const*> const& pair,
|
||||||
using SV = std::vector<std::string>;
|
ExecutionPlan* plan, MMFilesGeoIndexInfo info){
|
||||||
|
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess1;
|
||||||
|
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess2;
|
||||||
|
|
||||||
// first and second should be based on the same document - need to provide the document
|
// first and second should be based on the same document - need to provide the document
|
||||||
// in order to see which collection is bound to it and if that collections supports geo-index
|
// in order to see which collection is bound to it and if that collections supports geo-index
|
||||||
if( !pair.first->isAttributeAccessForVariable() || !pair.second->isAttributeAccessForVariable()){
|
if (!pair.first->isAttributeAccessForVariable(attributeAccess1) ||
|
||||||
|
!pair.second->isAttributeAccessForVariable(attributeAccess2)) {
|
||||||
info.invalidate();
|
info.invalidate();
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TRI_ASSERT(attributeAccess1.first != nullptr);
|
||||||
|
TRI_ASSERT(attributeAccess2.first != nullptr);
|
||||||
|
|
||||||
// expect access of the for doc.attribute
|
// expect access of the for doc.attribute
|
||||||
// TODO: more complex access path have to be added: loop until REFERENCE TYPE IS FOUND
|
auto setter1 = plan->getVarSetBy(attributeAccess1.first->id);
|
||||||
auto setter1 = plan->getVarSetBy(static_cast<Variable const*>(pair.first->getMember(0)->getData())->id);
|
auto setter2 = plan->getVarSetBy(attributeAccess2.first->id);
|
||||||
auto setter2 = plan->getVarSetBy(static_cast<Variable const*>(pair.second->getMember(0)->getData())->id);
|
|
||||||
SV accessPath1{pair.first->getString()};
|
|
||||||
SV accessPath2{pair.second->getString()};
|
|
||||||
|
|
||||||
if(setter1 == setter2){
|
if (setter1 != nullptr &&
|
||||||
if(setter1->getType() == EN::ENUMERATE_COLLECTION){
|
setter2 != nullptr &&
|
||||||
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
|
setter1 == setter2 &&
|
||||||
|
setter1->getType() == EN::ENUMERATE_COLLECTION) {
|
||||||
|
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
|
||||||
|
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
|
||||||
|
auto lcoll = coll->getCollection();
|
||||||
|
// TODO - check collection for suitable geo-indexes
|
||||||
|
for(auto indexShardPtr : lcoll->getIndexes()){
|
||||||
|
// get real index
|
||||||
|
arangodb::Index& index = *indexShardPtr.get();
|
||||||
|
|
||||||
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
|
// check if current index is a geo-index
|
||||||
auto lcoll = coll->getCollection();
|
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
|
||||||
// TODO - check collection for suitable geo-indexes
|
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||||
for(auto indexShardPtr : lcoll->getIndexes()){
|
continue;
|
||||||
// get real index
|
}
|
||||||
arangodb::Index& index = *indexShardPtr.get();
|
|
||||||
|
|
||||||
// check if current index is a geo-index
|
TRI_ASSERT(index.fields().size() == 2);
|
||||||
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
|
|
||||||
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX){
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
//check access paths of attributes in ast and those in index match
|
//check access paths of attributes in ast and those in index match
|
||||||
if( index.fieldNames()[0] == accessPath1 && index.fieldNames()[1] == accessPath2 ){
|
if (index.fields()[0] == attributeAccess1.second &&
|
||||||
info.collectionNode = collNode;
|
index.fields()[1] == attributeAccess2.second) {
|
||||||
info.index = indexShardPtr;
|
info.collectionNode = collNode;
|
||||||
info.longitude = std::move(accessPath1);
|
info.index = indexShardPtr;
|
||||||
info.latitude = std::move(accessPath2);
|
TRI_AttributeNamesJoinNested(attributeAccess1.second, info.longitude, true);
|
||||||
return info;
|
TRI_AttributeNamesJoinNested(attributeAccess2.second, info.latitude, true);
|
||||||
}
|
return info;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ class AqlItemBlock;
|
||||||
|
|
||||||
class ExecutionEngine;
|
class ExecutionEngine;
|
||||||
|
|
||||||
class SortBlock : public ExecutionBlock {
|
class SortBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
SortBlock(ExecutionEngine*, SortNode const*);
|
SortBlock(ExecutionEngine*, SortNode const*);
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ class ManagedDocumentResult;
|
||||||
|
|
||||||
namespace aql {
|
namespace aql {
|
||||||
|
|
||||||
class TraversalBlock : public ExecutionBlock {
|
class TraversalBlock final : public ExecutionBlock {
|
||||||
public:
|
public:
|
||||||
TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep);
|
TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep);
|
||||||
|
|
||||||
|
|
|
@ -286,11 +286,9 @@ void ClusterFeature::prepare() {
|
||||||
ServerState::instance()->setId(_myId);
|
ServerState::instance()->setId(_myId);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_requestedRole != ServerState::RoleEnum::ROLE_UNDEFINED) {
|
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
|
||||||
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
|
LOG(FATAL) << "Couldn't register at agency.";
|
||||||
LOG(FATAL) << "Couldn't register at agency.";
|
FATAL_ERROR_EXIT();
|
||||||
FATAL_ERROR_EXIT();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto role = ServerState::instance()->getRole();
|
auto role = ServerState::instance()->getRole();
|
||||||
|
|
|
@ -54,6 +54,8 @@ void DBServerAgencySync::work() {
|
||||||
DBServerAgencySyncResult DBServerAgencySync::execute() {
|
DBServerAgencySyncResult DBServerAgencySync::execute() {
|
||||||
// default to system database
|
// default to system database
|
||||||
|
|
||||||
|
double startTime = TRI_microtime();
|
||||||
|
|
||||||
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting";
|
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting";
|
||||||
DatabaseFeature* database =
|
DatabaseFeature* database =
|
||||||
ApplicationServer::getFeature<DatabaseFeature>("Database");
|
ApplicationServer::getFeature<DatabaseFeature>("Database");
|
||||||
|
@ -80,6 +82,11 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double now = TRI_microtime();
|
||||||
|
if (now - startTime > 5.0) {
|
||||||
|
LOG(INFO) << "DBServerAgencySync::execute took more than 5s to get free V8 context, starting handle-plan-change now";
|
||||||
|
}
|
||||||
|
|
||||||
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
|
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
|
||||||
|
|
||||||
auto isolate = context->_isolate;
|
auto isolate = context->_isolate;
|
||||||
|
|
|
@ -251,51 +251,70 @@ bool ServerState::unregister() {
|
||||||
bool ServerState::registerWithRole(ServerState::RoleEnum role,
|
bool ServerState::registerWithRole(ServerState::RoleEnum role,
|
||||||
std::string const& myAddress) {
|
std::string const& myAddress) {
|
||||||
|
|
||||||
if (!getId().empty()) {
|
|
||||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
|
||||||
<< "Registering with role and localinfo. Supplied id is being ignored";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
AgencyComm comm;
|
AgencyComm comm;
|
||||||
AgencyCommResult result;
|
AgencyCommResult result;
|
||||||
std::string localInfoEncoded = StringUtils::replace(
|
std::string localInfoEncoded = StringUtils::replace(
|
||||||
StringUtils::urlEncode(getLocalInfo()),"%2E",".");
|
StringUtils::urlEncode(getLocalInfo()),"%2E",".");
|
||||||
result = comm.getValues("Target/MapLocalToID/" + localInfoEncoded);
|
|
||||||
|
std::string locinf = "Target/MapLocalToID/" +
|
||||||
|
(localInfoEncoded.empty() ? "bogus_hass_hund" : localInfoEncoded);
|
||||||
|
std::string dbidinf = "Plan/DBServers/" +
|
||||||
|
(_id.empty() ? "bogus_hass_hund" : _id);
|
||||||
|
std::string coidinf = "Plan/Coordinators/" +
|
||||||
|
(_id.empty() ? "bogus_hass_hund" : _id);
|
||||||
|
|
||||||
|
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
|
||||||
|
AgencyGeneralTransaction reg;
|
||||||
|
reg.operations.push_back( // my-local-info
|
||||||
|
operationType(AgencyOperation(locinf), AgencyPrecondition()));
|
||||||
|
reg.operations.push_back( // db my-id
|
||||||
|
operationType(AgencyOperation(dbidinf), AgencyPrecondition()));
|
||||||
|
reg.operations.push_back( // cooord my-id
|
||||||
|
operationType(AgencyOperation(coidinf), AgencyPrecondition()));
|
||||||
|
result = comm.sendTransactionWithFailover(reg, 0.0);
|
||||||
|
|
||||||
std::string id;
|
std::string id;
|
||||||
bool found = true;
|
if (result.slice().isArray()) {
|
||||||
|
|
||||||
if (!result.successful()) {
|
VPackSlice targetSlice, planSlice;
|
||||||
found = false;
|
if (!_id.empty()) {
|
||||||
} else {
|
try {
|
||||||
VPackSlice idSlice = result.slice()[0].get(
|
if (
|
||||||
std::vector<std::string>({AgencyCommManager::path(), "Target",
|
result.slice()[1].get(
|
||||||
"MapLocalToID", localInfoEncoded}));
|
std::vector<std::string>({AgencyCommManager::path(), "Plan",
|
||||||
if (!idSlice.isString()) {
|
"DBServers", _id})).isString()) {
|
||||||
found = false;
|
id = _id;
|
||||||
} else {
|
if (role == ServerState::ROLE_UNDEFINED) {
|
||||||
id = idSlice.copyString();
|
role = ServerState::ROLE_PRIMARY;
|
||||||
LOG(WARN) << "Have ID: " + id;
|
}
|
||||||
|
} else if (
|
||||||
|
result.slice()[2].get(
|
||||||
|
std::vector<std::string>({AgencyCommManager::path(), "Plan",
|
||||||
|
"Coordinators", _id})).isString()) {
|
||||||
|
id = _id;
|
||||||
|
if (role == ServerState::ROLE_UNDEFINED) {
|
||||||
|
role = ServerState::ROLE_COORDINATOR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (...) {}
|
||||||
|
} else if (!localInfoEncoded.empty()) {
|
||||||
|
try {
|
||||||
|
id = result.slice()[0].get(
|
||||||
|
std::vector<std::string>({AgencyCommManager::path(), "Target",
|
||||||
|
"MapLocalToID", localInfoEncoded})).copyString();
|
||||||
|
} catch (...) {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
createIdForRole(comm, role, id);
|
|
||||||
if (found) {
|
id = createIdForRole(comm, role, id);
|
||||||
|
|
||||||
} else {
|
|
||||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
|
||||||
<< "Determining id from localinfo failed."
|
|
||||||
<< "Continuing with registering ourselves for the first time";
|
|
||||||
id = createIdForRole(comm, role);
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string agencyKey = roleToAgencyKey(role);
|
const std::string agencyKey = roleToAgencyKey(role);
|
||||||
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
||||||
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
||||||
|
|
||||||
auto builder = std::make_shared<VPackBuilder>();
|
auto builder = std::make_shared<VPackBuilder>();
|
||||||
result = comm.getValues(planKey);
|
result = comm.getValues(planKey);
|
||||||
found = true;
|
bool found = true;
|
||||||
if (!result.successful()) {
|
if (!result.successful()) {
|
||||||
found = false;
|
found = false;
|
||||||
} else {
|
} else {
|
||||||
|
@ -379,6 +398,9 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
||||||
|
|
||||||
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
|
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
|
||||||
std::string const agencyKey = roleToAgencyKey(role);
|
std::string const agencyKey = roleToAgencyKey(role);
|
||||||
|
std::string roleName = ((role == ROLE_COORDINATOR) ? "Coordinator":"DBServer");
|
||||||
|
|
||||||
|
size_t shortNum(0);
|
||||||
|
|
||||||
VPackBuilder builder;
|
VPackBuilder builder;
|
||||||
builder.add(VPackValue("none"));
|
builder.add(VPackValue("none"));
|
||||||
|
@ -392,11 +414,22 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
||||||
|
|
||||||
auto filePath = dbpath->directory() + "/UUID";
|
auto filePath = dbpath->directory() + "/UUID";
|
||||||
std::ifstream ifs(filePath);
|
std::ifstream ifs(filePath);
|
||||||
|
|
||||||
|
if (!id.empty()) {
|
||||||
|
if (id.compare(0, roleName.size(), roleName) == 0) {
|
||||||
|
try {
|
||||||
|
shortNum = std::stoul(id.substr(roleName.size(),3));
|
||||||
|
} catch(...) {
|
||||||
|
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
|
||||||
|
"Old id cannot be parsed for number.";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (ifs.is_open()) {
|
if (ifs.is_open()) {
|
||||||
std::getline(ifs, id);
|
std::getline(ifs, id);
|
||||||
ifs.close();
|
ifs.close();
|
||||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||||
<< "Restarting with persisted UUID " << id;
|
<< "Restarting with persisted UUID " << id;
|
||||||
} else {
|
} else {
|
||||||
mkdir (dbpath->directory());
|
mkdir (dbpath->directory());
|
||||||
|
@ -451,7 +484,7 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
||||||
reg.operations.push_back( // Get shortID
|
reg.operations.push_back( // Get shortID
|
||||||
operationType(AgencyOperation(targetIdStr), AgencyPrecondition()));
|
operationType(AgencyOperation(targetIdStr), AgencyPrecondition()));
|
||||||
result = comm.sendTransactionWithFailover(reg, 0.0);
|
result = comm.sendTransactionWithFailover(reg, 0.0);
|
||||||
|
|
||||||
VPackSlice latestId = result.slice()[2].get(
|
VPackSlice latestId = result.slice()[2].get(
|
||||||
std::vector<std::string>(
|
std::vector<std::string>(
|
||||||
{AgencyCommManager::path(), "Target",
|
{AgencyCommManager::path(), "Target",
|
||||||
|
@ -464,7 +497,8 @@ std::string ServerState::createIdForRole(AgencyComm comm,
|
||||||
localIdBuilder.add("TransactionID", latestId);
|
localIdBuilder.add("TransactionID", latestId);
|
||||||
std::stringstream ss; // ShortName
|
std::stringstream ss; // ShortName
|
||||||
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
|
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
|
||||||
<< std::setw(4) << std::setfill('0') << latestId.getNumber<uint32_t>();
|
<< std::setw(4) << std::setfill('0')
|
||||||
|
<< (shortNum ==0 ? latestId.getNumber<uint32_t>() : shortNum);
|
||||||
std::string shortName = ss.str();
|
std::string shortName = ss.str();
|
||||||
localIdBuilder.add("ShortName", VPackValue(shortName));
|
localIdBuilder.add("ShortName", VPackValue(shortName));
|
||||||
}
|
}
|
||||||
|
|
|
@ -754,7 +754,11 @@ static void JS_GetCollectionInfoClusterInfo(
|
||||||
uint32_t pos = 0;
|
uint32_t pos = 0;
|
||||||
for (auto const& s : p.second) {
|
for (auto const& s : p.second) {
|
||||||
try{
|
try{
|
||||||
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(s)));
|
std::string t = s;
|
||||||
|
if (s.at(0) == '_') {
|
||||||
|
t = s.substr(1);
|
||||||
|
}
|
||||||
|
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(t)));
|
||||||
} catch (...) {}
|
} catch (...) {}
|
||||||
list->Set(pos++, TRI_V8_STD_STRING(s));
|
list->Set(pos++, TRI_V8_STD_STRING(s));
|
||||||
}
|
}
|
||||||
|
@ -985,11 +989,23 @@ static void JS_GetDBServers(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
auto serverAliases = ClusterInfo::instance()->getServerAliases();
|
auto serverAliases = ClusterInfo::instance()->getServerAliases();
|
||||||
|
|
||||||
v8::Handle<v8::Array> l = v8::Array::New(isolate);
|
v8::Handle<v8::Array> l = v8::Array::New(isolate);
|
||||||
|
|
||||||
for (size_t i = 0; i < DBServers.size(); ++i) {
|
for (size_t i = 0; i < DBServers.size(); ++i) {
|
||||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||||
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(DBServers[i]));
|
auto id = DBServers[i];
|
||||||
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
|
||||||
TRI_V8_STD_STRING(serverAliases.at(DBServers[i])));
|
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(id));
|
||||||
|
|
||||||
|
auto itr = serverAliases.find(id);
|
||||||
|
|
||||||
|
if (itr != serverAliases.end()) {
|
||||||
|
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
||||||
|
TRI_V8_STD_STRING(itr->second));
|
||||||
|
} else {
|
||||||
|
result->Set(TRI_V8_ASCII_STRING("serverName"),
|
||||||
|
TRI_V8_STD_STRING(id));
|
||||||
|
}
|
||||||
|
|
||||||
l->Set((uint32_t)i, result);
|
l->Set((uint32_t)i, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,8 +91,6 @@ class GeneralCommTask : public SocketTask {
|
||||||
|
|
||||||
virtual arangodb::Endpoint::TransportType transportType() = 0;
|
virtual arangodb::Endpoint::TransportType transportType() = 0;
|
||||||
|
|
||||||
void setStatistics(uint64_t, RequestStatistics*);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual std::unique_ptr<GeneralResponse> createResponse(
|
virtual std::unique_ptr<GeneralResponse> createResponse(
|
||||||
rest::ResponseCode, uint64_t messageId) = 0;
|
rest::ResponseCode, uint64_t messageId) = 0;
|
||||||
|
@ -111,6 +109,7 @@ class GeneralCommTask : public SocketTask {
|
||||||
std::string const& errorMessage,
|
std::string const& errorMessage,
|
||||||
uint64_t messageId) = 0;
|
uint64_t messageId) = 0;
|
||||||
|
|
||||||
|
void setStatistics(uint64_t, RequestStatistics*);
|
||||||
RequestStatistics* acquireStatistics(uint64_t);
|
RequestStatistics* acquireStatistics(uint64_t);
|
||||||
RequestStatistics* statistics(uint64_t);
|
RequestStatistics* statistics(uint64_t);
|
||||||
RequestStatistics* stealStatistics(uint64_t);
|
RequestStatistics* stealStatistics(uint64_t);
|
||||||
|
|
|
@ -303,9 +303,17 @@ void DatabaseFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
||||||
&_check30Revisions,
|
&_check30Revisions,
|
||||||
std::unordered_set<std::string>{"true", "false", "fail"}));
|
std::unordered_set<std::string>{"true", "false", "fail"}));
|
||||||
|
|
||||||
|
// the following option was removed in 3.2
|
||||||
|
// index-creation is now automatically parallelized via the Boost ASIO thread pool
|
||||||
options->addObsoleteOption(
|
options->addObsoleteOption(
|
||||||
"--database.index-threads",
|
"--database.index-threads",
|
||||||
"threads to start for parallel background index creation", true);
|
"threads to start for parallel background index creation", true);
|
||||||
|
|
||||||
|
// the following options were removed in 3.2
|
||||||
|
options->addObsoleteOption("--database.revision-cache-chunk-size",
|
||||||
|
"chunk size (in bytes) for the document revisions cache", true);
|
||||||
|
options->addObsoleteOption("--database.revision-cache-target-size",
|
||||||
|
"total target size (in bytes) for the document revisions cache", true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
||||||
|
|
|
@ -49,7 +49,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
|
||||||
double keepAliveTimeout, bool skipInit = false)
|
double keepAliveTimeout, bool skipInit = false)
|
||||||
: Task(loop, "SocketTask"),
|
: Task(loop, "SocketTask"),
|
||||||
_connectionStatistics(nullptr),
|
_connectionStatistics(nullptr),
|
||||||
_connectionInfo(connectionInfo),
|
_connectionInfo(std::move(connectionInfo)),
|
||||||
_readBuffer(TRI_UNKNOWN_MEM_ZONE, READ_BLOCK_SIZE + 1, false),
|
_readBuffer(TRI_UNKNOWN_MEM_ZONE, READ_BLOCK_SIZE + 1, false),
|
||||||
_writeBuffer(nullptr, nullptr),
|
_writeBuffer(nullptr, nullptr),
|
||||||
_peer(std::move(socket)),
|
_peer(std::move(socket)),
|
||||||
|
|
|
@ -1777,16 +1777,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
|
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
|
||||||
LogicalCollection* collection = documentCollection(trxCollection(cid));
|
LogicalCollection* collection = documentCollection(trxCollection(cid));
|
||||||
|
|
||||||
// First see whether or not we have to do synchronous replication:
|
|
||||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
|
||||||
bool doingSynchronousReplication = false;
|
|
||||||
if (ServerState::isDBServer(_serverRole)) {
|
|
||||||
// Now replicate the same operation on all followers:
|
|
||||||
auto const& followerInfo = collection->followers();
|
|
||||||
followers = followerInfo->get();
|
|
||||||
doingSynchronousReplication = followers->size() > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (options.returnNew) {
|
if (options.returnNew) {
|
||||||
orderDitch(cid); // will throw when it fails
|
orderDitch(cid); // will throw when it fails
|
||||||
}
|
}
|
||||||
|
@ -1817,11 +1807,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (options.silent && !doingSynchronousReplication) {
|
|
||||||
// no need to construct the result object
|
|
||||||
return TRI_ERROR_NO_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint8_t const* vpack = result.vpack();
|
uint8_t const* vpack = result.vpack();
|
||||||
TRI_ASSERT(vpack != nullptr);
|
TRI_ASSERT(vpack != nullptr);
|
||||||
|
|
||||||
|
@ -1864,6 +1849,15 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now see whether or not we have to do synchronous replication:
|
||||||
|
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||||
|
bool doingSynchronousReplication = false;
|
||||||
|
if (ServerState::isDBServer(_serverRole)) {
|
||||||
|
// Now replicate the same operation on all followers:
|
||||||
|
auto const& followerInfo = collection->followers();
|
||||||
|
followers = followerInfo->get();
|
||||||
|
doingSynchronousReplication = followers->size() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||||
|
@ -1950,7 +1944,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && options.silent) {
|
if (options.silent) {
|
||||||
// We needed the results, but do not want to report:
|
// We needed the results, but do not want to report:
|
||||||
resultBuilder.clear();
|
resultBuilder.clear();
|
||||||
}
|
}
|
||||||
|
@ -2078,16 +2072,6 @@ OperationResult Transaction::modifyLocal(
|
||||||
orderDitch(cid); // will throw when it fails
|
orderDitch(cid); // will throw when it fails
|
||||||
}
|
}
|
||||||
|
|
||||||
// First see whether or not we have to do synchronous replication:
|
|
||||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
|
||||||
bool doingSynchronousReplication = false;
|
|
||||||
if (ServerState::isDBServer(_serverRole)) {
|
|
||||||
// Now replicate the same operation on all followers:
|
|
||||||
auto const& followerInfo = collection->followers();
|
|
||||||
followers = followerInfo->get();
|
|
||||||
doingSynchronousReplication = followers->size() > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update/replace are a read and a write, let's get the write lock already
|
// Update/replace are a read and a write, let's get the write lock already
|
||||||
// for the read operation:
|
// for the read operation:
|
||||||
int res = lock(trxCollection(cid), AccessMode::Type::WRITE);
|
int res = lock(trxCollection(cid), AccessMode::Type::WRITE);
|
||||||
|
@ -2125,7 +2109,7 @@ OperationResult Transaction::modifyLocal(
|
||||||
|
|
||||||
if (res == TRI_ERROR_ARANGO_CONFLICT) {
|
if (res == TRI_ERROR_ARANGO_CONFLICT) {
|
||||||
// still return
|
// still return
|
||||||
if ((!options.silent || doingSynchronousReplication) && !isBabies) {
|
if (!isBabies) {
|
||||||
StringRef key(newVal.get(StaticStrings::KeyString));
|
StringRef key(newVal.get(StaticStrings::KeyString));
|
||||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||||
|
@ -2138,13 +2122,11 @@ OperationResult Transaction::modifyLocal(
|
||||||
uint8_t const* vpack = result.vpack();
|
uint8_t const* vpack = result.vpack();
|
||||||
TRI_ASSERT(vpack != nullptr);
|
TRI_ASSERT(vpack != nullptr);
|
||||||
|
|
||||||
if (!options.silent || doingSynchronousReplication) {
|
StringRef key(newVal.get(StaticStrings::KeyString));
|
||||||
StringRef key(newVal.get(StaticStrings::KeyString));
|
buildDocumentIdentity(collection, resultBuilder, cid, key,
|
||||||
buildDocumentIdentity(collection, resultBuilder, cid, key,
|
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
|
||||||
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
|
options.returnOld ? previous.vpack() : nullptr ,
|
||||||
options.returnOld ? previous.vpack() : nullptr ,
|
options.returnNew ? vpack : nullptr);
|
||||||
options.returnNew ? vpack : nullptr);
|
|
||||||
}
|
|
||||||
return TRI_ERROR_NO_ERROR;
|
return TRI_ERROR_NO_ERROR;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2173,6 +2155,16 @@ OperationResult Transaction::modifyLocal(
|
||||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now see whether or not we have to do synchronous replication:
|
||||||
|
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||||
|
bool doingSynchronousReplication = false;
|
||||||
|
if (ServerState::isDBServer(_serverRole)) {
|
||||||
|
// Now replicate the same operation on all followers:
|
||||||
|
auto const& followerInfo = collection->followers();
|
||||||
|
followers = followerInfo->get();
|
||||||
|
doingSynchronousReplication = followers->size() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||||
// get here, in the single document case, we do not try to replicate
|
// get here, in the single document case, we do not try to replicate
|
||||||
|
@ -2262,7 +2254,7 @@ OperationResult Transaction::modifyLocal(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && options.silent) {
|
if (options.silent) {
|
||||||
// We needed the results, but do not want to report:
|
// We needed the results, but do not want to report:
|
||||||
resultBuilder.clear();
|
resultBuilder.clear();
|
||||||
}
|
}
|
||||||
|
@ -2332,16 +2324,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
orderDitch(cid); // will throw when it fails
|
orderDitch(cid); // will throw when it fails
|
||||||
}
|
}
|
||||||
|
|
||||||
// First see whether or not we have to do synchronous replication:
|
|
||||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
|
||||||
bool doingSynchronousReplication = false;
|
|
||||||
if (ServerState::isDBServer(_serverRole)) {
|
|
||||||
// Now replicate the same operation on all followers:
|
|
||||||
auto const& followerInfo = collection->followers();
|
|
||||||
followers = followerInfo->get();
|
|
||||||
doingSynchronousReplication = followers->size() > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VPackBuilder resultBuilder;
|
VPackBuilder resultBuilder;
|
||||||
TRI_voc_tick_t maxTick = 0;
|
TRI_voc_tick_t maxTick = 0;
|
||||||
|
|
||||||
|
@ -2380,7 +2362,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
if (res == TRI_ERROR_ARANGO_CONFLICT &&
|
if (res == TRI_ERROR_ARANGO_CONFLICT &&
|
||||||
(!options.silent || doingSynchronousReplication) &&
|
|
||||||
!isBabies) {
|
!isBabies) {
|
||||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||||
|
@ -2388,10 +2369,8 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!options.silent || doingSynchronousReplication) {
|
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
||||||
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
|
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
||||||
options.returnOld ? previous.vpack() : nullptr, nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return TRI_ERROR_NO_ERROR;
|
return TRI_ERROR_NO_ERROR;
|
||||||
};
|
};
|
||||||
|
@ -2418,6 +2397,16 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now see whether or not we have to do synchronous replication:
|
||||||
|
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||||
|
bool doingSynchronousReplication = false;
|
||||||
|
if (ServerState::isDBServer(_serverRole)) {
|
||||||
|
// Now replicate the same operation on all followers:
|
||||||
|
auto const& followerInfo = collection->followers();
|
||||||
|
followers = followerInfo->get();
|
||||||
|
doingSynchronousReplication = followers->size() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
|
||||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||||
// get here, in the single document case, we do not try to replicate
|
// get here, in the single document case, we do not try to replicate
|
||||||
|
@ -2505,7 +2494,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && options.silent) {
|
if (options.silent) {
|
||||||
// We needed the results, but do not want to report:
|
// We needed the results, but do not want to report:
|
||||||
resultBuilder.clear();
|
resultBuilder.clear();
|
||||||
}
|
}
|
||||||
|
|
|
@ -838,7 +838,7 @@ static TRI_action_result_t ExecuteActionVocbase(
|
||||||
// copy suffix, which comes from the action:
|
// copy suffix, which comes from the action:
|
||||||
std::string path = request->prefix();
|
std::string path = request->prefix();
|
||||||
v8::Handle<v8::Array> suffixArray = v8::Array::New(isolate);
|
v8::Handle<v8::Array> suffixArray = v8::Array::New(isolate);
|
||||||
std::vector<std::string> const& suffixes = request->suffixes();
|
std::vector<std::string> const& suffixes = request->suffixes(); // TODO: does this need to be decodedSuffixes()??
|
||||||
|
|
||||||
uint32_t index = 0;
|
uint32_t index = 0;
|
||||||
char const* sep = "";
|
char const* sep = "";
|
||||||
|
|
|
@ -1,17 +1,12 @@
|
||||||
# -*- mode: CMAKE; -*-
|
# -*- mode: CMAKE; -*-
|
||||||
# these are the install targets for the client package.
|
# these are the install targets for the client package.
|
||||||
# we can't use RUNTIME DESTINATION here.
|
# we can't use RUNTIME DESTINATION here.
|
||||||
|
# include(/tmp/dump_vars.cmake)
|
||||||
|
message( "CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}/ CMAKE_INSTALL_SBINDIR ${CMAKE_INSTALL_SBINDIR}")
|
||||||
|
|
||||||
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
|
install_debinfo(
|
||||||
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_SBINDIR}"
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
|
"${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
)
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_SBINDIR})
|
|
||||||
|
|
|
@ -2,63 +2,30 @@
|
||||||
# these are the install targets for the client package.
|
# these are the install targets for the client package.
|
||||||
# we can't use RUNTIME DESTINATION here.
|
# we can't use RUNTIME DESTINATION here.
|
||||||
|
|
||||||
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
|
|
||||||
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
|
install_debinfo(
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
"${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
|
||||||
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
|
||||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
|
||||||
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
|
||||||
set(FILE ${STRIP_FILE})
|
|
||||||
endif()
|
|
||||||
install(
|
|
||||||
PROGRAMS ${FILE}
|
|
||||||
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
|
|
||||||
|
|
||||||
|
install_debinfo(
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
|
"${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
install_debinfo(
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
|
"${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
install_debinfo(
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
|
"${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
install_debinfo(
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
|
"${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
|
|
@ -19,9 +19,9 @@ endif()
|
||||||
# debug info directory:
|
# debug info directory:
|
||||||
if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64")
|
if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64")
|
||||||
# some systems have weird places for usr/lib:
|
# some systems have weird places for usr/lib:
|
||||||
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/${CMAKE_PROJECT_NAME}")
|
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/")
|
||||||
else ()
|
else ()
|
||||||
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_PROJECT_NAME}")
|
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}")
|
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}")
|
||||||
|
|
|
@ -157,3 +157,39 @@ macro(to_native_path sourceVarName)
|
||||||
endif()
|
endif()
|
||||||
set("INC_${sourceVarName}" ${myVar})
|
set("INC_${sourceVarName}" ${myVar})
|
||||||
endmacro()
|
endmacro()
|
||||||
|
|
||||||
|
macro(install_debinfo
|
||||||
|
STRIP_DIR
|
||||||
|
USER_SUB_DEBINFO_DIR
|
||||||
|
USER_FILE
|
||||||
|
USER_STRIP_FILE)
|
||||||
|
|
||||||
|
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
|
||||||
|
set(FILE ${USER_FILE})
|
||||||
|
set(STRIP_FILE ${USER_STRIP_FILE})
|
||||||
|
execute_process(COMMAND mkdir -p ${STRIP_DIR})
|
||||||
|
if (NOT MSVC AND CMAKE_STRIP)
|
||||||
|
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${FILE_EXECUTABLE} ${FILE}
|
||||||
|
OUTPUT_VARIABLE FILE_RESULT)
|
||||||
|
|
||||||
|
string(REGEX
|
||||||
|
REPLACE ".*=([a-z0-9]*),.*" "\\1"
|
||||||
|
FILE_CHECKSUM
|
||||||
|
${FILE_RESULT}
|
||||||
|
)
|
||||||
|
|
||||||
|
if (NOT ${FILE_CHECKSUM} STREQUAL "")
|
||||||
|
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
|
||||||
|
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
|
||||||
|
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
|
||||||
|
endif()
|
||||||
|
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
|
||||||
|
set(FILE ${STRIP_FILE})
|
||||||
|
endif()
|
||||||
|
install(
|
||||||
|
PROGRAMS ${FILE}
|
||||||
|
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
|
||||||
|
endmacro()
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
################################################################################
|
################################################################################
|
||||||
# the client package is a complete cmake sub package.
|
# the client package is a complete cmake sub package.
|
||||||
################################################################################
|
################################################################################
|
||||||
project(PACKAGE-DBG)
|
project(@CMAKE_PROJECT_NAME@)
|
||||||
cmake_minimum_required(VERSION 2.8)
|
cmake_minimum_required(VERSION 2.8)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -15,6 +15,9 @@ set(CROSS_COMPILING @CROSS_COMPILING@)
|
||||||
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
|
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
|
||||||
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
|
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
|
||||||
|
|
||||||
|
set(CMAKE_INSTALL_SBINDIR @CMAKE_INSTALL_SBINDIR@)
|
||||||
|
set(CMAKE_INSTALL_FULL_SBINDIR @CMAKE_INSTALL_FULL_SBINDIR@)
|
||||||
|
|
||||||
set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@)
|
set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@)
|
||||||
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@)
|
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@)
|
||||||
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@)
|
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@)
|
||||||
|
|
|
@ -69,6 +69,10 @@
|
||||||
return shortName;
|
return shortName;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
getDatabaseShortName: function (id) {
|
||||||
|
return this.getCoordinatorShortName(id);
|
||||||
|
},
|
||||||
|
|
||||||
getDatabaseServerId: function (shortname) {
|
getDatabaseServerId: function (shortname) {
|
||||||
var id;
|
var id;
|
||||||
if (window.clusterHealth) {
|
if (window.clusterHealth) {
|
||||||
|
|
|
@ -186,14 +186,15 @@
|
||||||
async: true,
|
async: true,
|
||||||
success: function (data) {
|
success: function (data) {
|
||||||
if (data.id) {
|
if (data.id) {
|
||||||
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
|
console.log(toServer);
|
||||||
|
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
|
||||||
window.setTimeout(function () {
|
window.setTimeout(function () {
|
||||||
window.App.shardsView.render();
|
window.App.shardsView.render();
|
||||||
}, 3000);
|
}, 3000);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
error: function () {
|
error: function () {
|
||||||
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
|
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -498,13 +498,20 @@ router.get("/coordshort", function(req, res) {
|
||||||
if (Array.isArray(coordinators)) {
|
if (Array.isArray(coordinators)) {
|
||||||
var coordinatorStats = coordinators.map(coordinator => {
|
var coordinatorStats = coordinators.map(coordinator => {
|
||||||
var endpoint = global.ArangoClusterInfo.getServerEndpoint(coordinator);
|
var endpoint = global.ArangoClusterInfo.getServerEndpoint(coordinator);
|
||||||
var response = download(endpoint.replace(/^tcp/, "http") + "/_db/_system/_admin/aardvark/statistics/short?count=" + coordinators.length, '', {headers: {}});
|
if (endpoint !== "") {
|
||||||
try {
|
var response = download(endpoint.replace(/^tcp/, "http") + "/_db/_system/_admin/aardvark/statistics/short?count=" + coordinators.length, '', {headers: {}});
|
||||||
return JSON.parse(response.body);
|
if (response.body === undefined) {
|
||||||
} catch (e) {
|
console.warn("cannot contact coordinator " + coordinator + " on endpoint " + endpoint);
|
||||||
console.error("Couldn't read statistics response:", response.body);
|
} else {
|
||||||
throw e;
|
try {
|
||||||
|
return JSON.parse(response.body);
|
||||||
|
} catch (e) {
|
||||||
|
console.error("Couldn't read statistics response:", response.body);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
});
|
});
|
||||||
|
|
||||||
mergeHistory(coordinatorStats);
|
mergeHistory(coordinatorStats);
|
||||||
|
|
|
@ -179,7 +179,7 @@ const optionsDefaults = {
|
||||||
'skipBoost': false,
|
'skipBoost': false,
|
||||||
'skipEndpoints': false,
|
'skipEndpoints': false,
|
||||||
'skipGeo': false,
|
'skipGeo': false,
|
||||||
'skipLogAnalysis': false,
|
'skipLogAnalysis': true,
|
||||||
'skipMemoryIntense': false,
|
'skipMemoryIntense': false,
|
||||||
'skipNightly': true,
|
'skipNightly': true,
|
||||||
'skipNondeterministic': false,
|
'skipNondeterministic': false,
|
||||||
|
|
|
@ -455,6 +455,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
// synchronize this shard from the leader
|
// synchronize this shard from the leader
|
||||||
// this function will throw if anything goes wrong
|
// this function will throw if anything goes wrong
|
||||||
|
|
||||||
|
var startTime = new Date();
|
||||||
var isStopping = require('internal').isStopping;
|
var isStopping = require('internal').isStopping;
|
||||||
var ourselves = global.ArangoServerState.id();
|
var ourselves = global.ArangoServerState.id();
|
||||||
|
|
||||||
|
@ -487,8 +488,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
planned[0] !== leader) {
|
planned[0] !== leader) {
|
||||||
// Things have changed again, simply terminate:
|
// Things have changed again, simply terminate:
|
||||||
terminateAndStartOther();
|
terminateAndStartOther();
|
||||||
console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s',
|
let endTime = new Date();
|
||||||
database, shard, database, planId);
|
console.debug('synchronizeOneShard: cancelled, %s/%s, %s/%s, started %s, ended %s',
|
||||||
|
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
var current = [];
|
var current = [];
|
||||||
|
@ -502,8 +504,9 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
}
|
}
|
||||||
// We are already there, this is rather strange, but never mind:
|
// We are already there, this is rather strange, but never mind:
|
||||||
terminateAndStartOther();
|
terminateAndStartOther();
|
||||||
console.debug('synchronizeOneShard: already done, %s/%s, %s/%s',
|
let endTime = new Date();
|
||||||
database, shard, database, planId);
|
console.debug('synchronizeOneShard: already done, %s/%s, %s/%s, started %s, ended %s',
|
||||||
|
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
console.debug('synchronizeOneShard: waiting for leader, %s/%s, %s/%s',
|
console.debug('synchronizeOneShard: waiting for leader, %s/%s, %s/%s',
|
||||||
|
@ -524,9 +527,16 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
if (isStopping()) {
|
if (isStopping()) {
|
||||||
throw 'server is shutting down';
|
throw 'server is shutting down';
|
||||||
}
|
}
|
||||||
|
let startTime = new Date();
|
||||||
sy = rep.syncCollection(shard,
|
sy = rep.syncCollection(shard,
|
||||||
{ endpoint: ep, incremental: true,
|
{ endpoint: ep, incremental: true,
|
||||||
keepBarrier: true, useCollectionId: false });
|
keepBarrier: true, useCollectionId: false });
|
||||||
|
let endTime = new Date();
|
||||||
|
let longSync = false;
|
||||||
|
if (endTime - startTime > 5000) {
|
||||||
|
console.error('synchronizeOneShard: long call to syncCollection for shard', shard, JSON.stringify(sy), "start time: ", startTime.toString(), "end time: ", endTime.toString());
|
||||||
|
longSync = true;
|
||||||
|
}
|
||||||
if (sy.error) {
|
if (sy.error) {
|
||||||
console.error('synchronizeOneShard: could not initially synchronize',
|
console.error('synchronizeOneShard: could not initially synchronize',
|
||||||
'shard ', shard, sy);
|
'shard ', shard, sy);
|
||||||
|
@ -534,7 +544,15 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
} else {
|
} else {
|
||||||
if (sy.collections.length === 0 ||
|
if (sy.collections.length === 0 ||
|
||||||
sy.collections[0].name !== shard) {
|
sy.collections[0].name !== shard) {
|
||||||
|
if (longSync) {
|
||||||
|
console.error('synchronizeOneShard: long sync, before cancelBarrier',
|
||||||
|
new Date().toString());
|
||||||
|
}
|
||||||
cancelBarrier(ep, database, sy.barrierId);
|
cancelBarrier(ep, database, sy.barrierId);
|
||||||
|
if (longSync) {
|
||||||
|
console.error('synchronizeOneShard: long sync, after cancelBarrier',
|
||||||
|
new Date().toString());
|
||||||
|
}
|
||||||
throw 'Shard ' + shard + ' seems to be gone from leader!';
|
throw 'Shard ' + shard + ' seems to be gone from leader!';
|
||||||
} else {
|
} else {
|
||||||
// Now start a read transaction to stop writes:
|
// Now start a read transaction to stop writes:
|
||||||
|
@ -594,14 +612,17 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
} else if (err2 && err2.errorNum === 1402 && err2.errorMessage.match(/HTTP 404/)) {
|
} else if (err2 && err2.errorNum === 1402 && err2.errorMessage.match(/HTTP 404/)) {
|
||||||
logLevel = 'debug';
|
logLevel = 'debug';
|
||||||
}
|
}
|
||||||
console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
let endTime = new Date();
|
||||||
database, shard, database, planId, JSON.stringify(err2));
|
console[logLevel]("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s, started: %s, ended: %s",
|
||||||
|
database, shard, database, planId, JSON.stringify(err2),
|
||||||
|
startTime.toString(), endTime.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Tell others that we are done:
|
// Tell others that we are done:
|
||||||
terminateAndStartOther();
|
terminateAndStartOther();
|
||||||
console.debug('synchronizeOneShard: done, %s/%s, %s/%s',
|
let endTime = new Date();
|
||||||
database, shard, database, planId);
|
console.debug('synchronizeOneShard: done, %s/%s, %s/%s, started: %s, ended: %s',
|
||||||
|
database, shard, database, planId, startTime.toString(), endTime.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
// /////////////////////////////////////////////////////////////////////////////
|
// /////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -1789,8 +1810,7 @@ function shardDistribution () {
|
||||||
var result = {};
|
var result = {};
|
||||||
for (var i = 0; i < colls.length; ++i) {
|
for (var i = 0; i < colls.length; ++i) {
|
||||||
var collName = colls[i].name();
|
var collName = colls[i].name();
|
||||||
var collInfo = global.ArangoClusterInfo.getCollectionInfo(dbName,
|
var collInfo = global.ArangoClusterInfo.getCollectionInfo(dbName, collName);
|
||||||
collName);
|
|
||||||
var shards = collInfo.shards;
|
var shards = collInfo.shards;
|
||||||
var collInfoCurrent = {};
|
var collInfoCurrent = {};
|
||||||
var shardNames = Object.keys(shards);
|
var shardNames = Object.keys(shards);
|
||||||
|
|
|
@ -152,7 +152,11 @@ exports.manage = function () {
|
||||||
});
|
});
|
||||||
|
|
||||||
// switch back into previous database
|
// switch back into previous database
|
||||||
db._useDatabase(initialDatabase);
|
try {
|
||||||
|
db._useDatabase(initialDatabase);
|
||||||
|
} catch (err) {
|
||||||
|
db._useDatabase('_system');
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
exports.run = function () {
|
exports.run = function () {
|
||||||
|
|
|
@ -55,6 +55,7 @@ function optimizerRuleTestSuite() {
|
||||||
|
|
||||||
var ruleName = "geoindex";
|
var ruleName = "geoindex";
|
||||||
var colName = "UnitTestsAqlOptimizer" + ruleName.replace(/-/g, "_");
|
var colName = "UnitTestsAqlOptimizer" + ruleName.replace(/-/g, "_");
|
||||||
|
var colName2 = colName2;
|
||||||
|
|
||||||
var geocol;
|
var geocol;
|
||||||
var sortArray = function (l, r) {
|
var sortArray = function (l, r) {
|
||||||
|
@ -124,11 +125,21 @@ function optimizerRuleTestSuite() {
|
||||||
internal.db._drop(colName);
|
internal.db._drop(colName);
|
||||||
geocol = internal.db._create(colName);
|
geocol = internal.db._create(colName);
|
||||||
geocol.ensureIndex({type:"geo", fields:["lat","lon"]});
|
geocol.ensureIndex({type:"geo", fields:["lat","lon"]});
|
||||||
for (var lat=-40; lat <=40 ; ++lat){
|
var lat, lon;
|
||||||
for (var lon=-40; lon <= 40; ++lon){
|
for (lat=-40; lat <=40 ; ++lat) {
|
||||||
|
for (lon=-40; lon <= 40; ++lon) {
|
||||||
geocol.insert({lat,lon});
|
geocol.insert({lat,lon});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
internal.db._drop(colName2);
|
||||||
|
geocol = internal.db._create(colName2);
|
||||||
|
geocol.ensureIndex({type:"geo", fields:["loca.tion.lat","loca.tion.lon"]});
|
||||||
|
for (lat=-40; lat <=40 ; ++lat) {
|
||||||
|
for (lon=-40; lon <= 40; ++lon) {
|
||||||
|
geocol.insert({ loca : { tion : { lat , lon } } });
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -137,6 +148,7 @@ function optimizerRuleTestSuite() {
|
||||||
|
|
||||||
tearDown : function () {
|
tearDown : function () {
|
||||||
internal.db._drop(colName);
|
internal.db._drop(colName);
|
||||||
|
internal.db._drop(colName2);
|
||||||
geocol = null;
|
geocol = null;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -145,7 +157,13 @@ function optimizerRuleTestSuite() {
|
||||||
geocol.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
|
geocol.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
|
||||||
|
|
||||||
var queries = [
|
var queries = [
|
||||||
{ string : "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
{ string : "FOR d IN " + colName + " SORT distance(d.lat, d.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
||||||
|
cluster : false,
|
||||||
|
sort : false,
|
||||||
|
filter : false,
|
||||||
|
index : true
|
||||||
|
},
|
||||||
|
{ string : "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat, d.loca.tion.lon, 0 ,0 ) ASC LIMIT 1 RETURN d",
|
||||||
cluster : false,
|
cluster : false,
|
||||||
sort : false,
|
sort : false,
|
||||||
filter : false,
|
filter : false,
|
||||||
|
@ -213,11 +231,15 @@ function optimizerRuleTestSuite() {
|
||||||
testRuleRemoveNodes : function () {
|
testRuleRemoveNodes : function () {
|
||||||
if(enabled.removeNodes){
|
if(enabled.removeNodes){
|
||||||
var queries = [
|
var queries = [
|
||||||
[ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ],
|
[ "FOR d IN " + colName + " SORT distance(d.lat,d.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||||
[ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ],
|
[ "FOR d IN " + colName + " SORT distance(0, 0, d.lat,d.lon ) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||||
[ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ],
|
[ "FOR d IN " + colName + " FILTER distance(0, 0, d.lat,d.lon ) < 111200 RETURN d", false, false, false ],
|
||||||
// [ "FOR i IN 1..2 FOR d IN geocol SORT distance(i,2,d.lat,d.lon) ASC LIMIT 5 RETURN d", false, false, false ],
|
// [ "FOR i IN 1..2 FOR d IN geocol SORT distance(i,2,d.lat,d.lon) ASC LIMIT 5 RETURN d", false, false, false ],
|
||||||
];
|
];
|
||||||
|
|
||||||
|
var queries2 = [
|
||||||
|
[ "FOR d IN " + colName2 + " SORT distance(d.loca.tion.lat,d.loca.tion.lon, 0 ,0 ) ASC LIMIT 5 RETURN d", false, false, false ]
|
||||||
|
];
|
||||||
|
|
||||||
var expected = [
|
var expected = [
|
||||||
[[0,0], [-1,0], [0,1], [1,0], [0,-1]],
|
[[0,0], [-1,0], [0,1], [1,0], [0,-1]],
|
||||||
|
@ -234,6 +256,16 @@ function optimizerRuleTestSuite() {
|
||||||
assertEqual(expected[qindex].sort(),pairs.sort());
|
assertEqual(expected[qindex].sort(),pairs.sort());
|
||||||
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
|
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
|
||||||
});
|
});
|
||||||
|
|
||||||
|
queries2.forEach(function(query, qindex) {
|
||||||
|
var result = AQL_EXECUTE(query[0]);
|
||||||
|
expect(expected[qindex].length).to.be.equal(result.json.length);
|
||||||
|
var pairs = result.json.map(function(res){
|
||||||
|
return [res.loca.tion.lat,res.loca.tion.lon];
|
||||||
|
});
|
||||||
|
assertEqual(expected[qindex].sort(),pairs.sort());
|
||||||
|
//expect(expected[qindex].sort()).to.be.equal(result.json.sort())
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}, // testRuleSort
|
}, // testRuleSort
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,92 @@
|
||||||
|
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||||
|
/*global assertTrue, assertFalse, assertEqual, AQL_EXECUTE */
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief tests for index usage
|
||||||
|
///
|
||||||
|
/// @file
|
||||||
|
///
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Jan Steemann
|
||||||
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
var jsunity = require("jsunity");
|
||||||
|
var db = require("@arangodb").db;
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test suite
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function sortTestSuite () {
|
||||||
|
var c;
|
||||||
|
|
||||||
|
return {
|
||||||
|
setUp : function () {
|
||||||
|
db._drop("UnitTestsCollection");
|
||||||
|
c = db._create("UnitTestsCollection", { numberOfShards: 8 });
|
||||||
|
|
||||||
|
for (var i = 0; i < 11111; ++i) {
|
||||||
|
c.save({ _key: "test" + i, value: i });
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
tearDown : function () {
|
||||||
|
db._drop("UnitTestsCollection");
|
||||||
|
},
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test without index
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
testSortNoIndex : function () {
|
||||||
|
var result = AQL_EXECUTE("FOR doc IN " + c.name() + " SORT doc.value RETURN doc.value").json;
|
||||||
|
assertEqual(11111, result.length);
|
||||||
|
|
||||||
|
var last = -1;
|
||||||
|
for (var i = 0; i < result.length; ++i) {
|
||||||
|
assertTrue(result[i] > last);
|
||||||
|
last = result[i];
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test with index
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
testSortSkiplist : function () {
|
||||||
|
c.ensureIndex({ type: "skiplist", fields: [ "value" ]});
|
||||||
|
var result = AQL_EXECUTE("FOR doc IN " + c.name() + " SORT doc.value RETURN doc.value").json;
|
||||||
|
assertEqual(11111, result.length);
|
||||||
|
|
||||||
|
var last = -1;
|
||||||
|
for (var i = 0; i < result.length; ++i) {
|
||||||
|
assertTrue(result[i] > last);
|
||||||
|
last = result[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
jsunity.run(sortTestSuite);
|
||||||
|
|
||||||
|
return jsunity.done();
|
||||||
|
|
|
@ -1141,6 +1141,52 @@ bool isSuffix(std::string const& str, std::string const& postfix) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string urlDecodePath(std::string const& str) {
|
||||||
|
std::string result;
|
||||||
|
// reserve enough room so we do not need to re-alloc
|
||||||
|
result.reserve(str.size() + 16);
|
||||||
|
|
||||||
|
char const* src = str.c_str();
|
||||||
|
char const* end = src + str.size();
|
||||||
|
|
||||||
|
while (src < end) {
|
||||||
|
if (*src == '%') {
|
||||||
|
if (src + 2 < end) {
|
||||||
|
int h1 = hex2int(src[1], -1);
|
||||||
|
int h2 = hex2int(src[2], -1);
|
||||||
|
|
||||||
|
if (h1 == -1) {
|
||||||
|
++src;
|
||||||
|
} else {
|
||||||
|
if (h2 == -1) {
|
||||||
|
result.push_back(h1);
|
||||||
|
src += 2;
|
||||||
|
} else {
|
||||||
|
result.push_back(h1 << 4 | h2);
|
||||||
|
src += 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (src + 1 < end) {
|
||||||
|
int h1 = hex2int(src[1], -1);
|
||||||
|
|
||||||
|
if (h1 == -1) {
|
||||||
|
++src;
|
||||||
|
} else {
|
||||||
|
result.push_back(h1);
|
||||||
|
src += 2;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
++src;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
result.push_back(*src);
|
||||||
|
++src;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
std::string urlDecode(std::string const& str) {
|
std::string urlDecode(std::string const& str) {
|
||||||
std::string result;
|
std::string result;
|
||||||
// reserve enough room so we do not need to re-alloc
|
// reserve enough room so we do not need to re-alloc
|
||||||
|
|
|
@ -299,6 +299,7 @@ bool isSuffix(std::string const& str, std::string const& postfix);
|
||||||
/// @brief url decodes the string
|
/// @brief url decodes the string
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
std::string urlDecodePath(std::string const& str);
|
||||||
std::string urlDecode(std::string const& str);
|
std::string urlDecode(std::string const& str);
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -280,7 +280,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
|
||||||
|
|
||||||
// hostname and port (e.g. [address]:port)
|
// hostname and port (e.g. [address]:port)
|
||||||
if (found != std::string::npos && found > 2 && found + 2 < copy.size()) {
|
if (found != std::string::npos && found > 2 && found + 2 < copy.size()) {
|
||||||
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 2));
|
int64_t value = StringUtils::int64(copy.substr(found + 2));
|
||||||
|
// check port over-/underrun
|
||||||
|
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
|
||||||
|
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
uint16_t port = static_cast<uint16_t>(value);
|
||||||
std::string host = copy.substr(1, found - 1);
|
std::string host = copy.substr(1, found - 1);
|
||||||
|
|
||||||
return new EndpointIpV6(type, protocol, encryption, listenBacklog,
|
return new EndpointIpV6(type, protocol, encryption, listenBacklog,
|
||||||
|
@ -306,7 +312,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
|
||||||
|
|
||||||
// hostname and port
|
// hostname and port
|
||||||
if (found != std::string::npos && found + 1 < copy.size()) {
|
if (found != std::string::npos && found + 1 < copy.size()) {
|
||||||
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 1));
|
int64_t value = StringUtils::int64(copy.substr(found + 1));
|
||||||
|
// check port over-/underrun
|
||||||
|
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
|
||||||
|
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
uint16_t port = static_cast<uint16_t>(value);
|
||||||
std::string host = copy.substr(0, found);
|
std::string host = copy.substr(0, found);
|
||||||
|
|
||||||
return new EndpointIpV4(type, protocol, encryption, listenBacklog,
|
return new EndpointIpV4(type, protocol, encryption, listenBacklog,
|
||||||
|
|
|
@ -88,8 +88,9 @@ class LoggerStream {
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
size_t const n = obj.size();
|
size_t const n = obj.size();
|
||||||
for (auto const& it : obj) {
|
for (auto const& it : obj) {
|
||||||
|
_out << it;
|
||||||
if (++i < n) {
|
if (++i < n) {
|
||||||
_out << it << ", ";
|
_out << ", ";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_out << ']';
|
_out << ']';
|
||||||
|
@ -102,8 +103,9 @@ class LoggerStream {
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
size_t const n = obj.size();
|
size_t const n = obj.size();
|
||||||
for (auto const& it : obj) {
|
for (auto const& it : obj) {
|
||||||
|
_out << it;
|
||||||
if (++i < n) {
|
if (++i < n) {
|
||||||
_out << it << ", ";
|
_out << ", ";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_out << '}';
|
_out << '}';
|
||||||
|
@ -116,8 +118,9 @@ class LoggerStream {
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
size_t n = obj.size();
|
size_t n = obj.size();
|
||||||
for (auto const& it : obj) {
|
for (auto const& it : obj) {
|
||||||
|
_out << it;
|
||||||
if (++i < n) {
|
if (++i < n) {
|
||||||
_out << it << ", ";
|
_out << ", ";
|
||||||
}
|
}
|
||||||
_out << it.first << " => " << it.second;
|
_out << it.first << " => " << it.second;
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,7 +217,7 @@ std::vector<std::string> GeneralRequest::decodedSuffixes() const {
|
||||||
result.reserve(_suffixes.size());
|
result.reserve(_suffixes.size());
|
||||||
|
|
||||||
for (auto const& it : _suffixes) {
|
for (auto const& it : _suffixes) {
|
||||||
result.emplace_back(StringUtils::urlDecode(it));
|
result.emplace_back(StringUtils::urlDecodePath(it));
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,14 +27,14 @@
|
||||||
|
|
||||||
#include "Basics/Common.h"
|
#include "Basics/Common.h"
|
||||||
|
|
||||||
|
#include "Endpoint/ConnectionInfo.h"
|
||||||
|
#include "Rest/CommonDefines.h"
|
||||||
|
|
||||||
#include <velocypack/Builder.h>
|
#include <velocypack/Builder.h>
|
||||||
#include <velocypack/Dumper.h>
|
#include <velocypack/Dumper.h>
|
||||||
#include <velocypack/Options.h>
|
#include <velocypack/Options.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
#include "Endpoint/ConnectionInfo.h"
|
|
||||||
#include "Rest/CommonDefines.h"
|
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
namespace velocypack {
|
namespace velocypack {
|
||||||
class Builder;
|
class Builder;
|
||||||
|
@ -95,9 +95,6 @@ class GeneralRequest {
|
||||||
void setProtocol(char const* protocol) { _protocol = protocol; }
|
void setProtocol(char const* protocol) { _protocol = protocol; }
|
||||||
|
|
||||||
ConnectionInfo const& connectionInfo() const { return _connectionInfo; }
|
ConnectionInfo const& connectionInfo() const { return _connectionInfo; }
|
||||||
void setConnectionInfo(ConnectionInfo const& connectionInfo) {
|
|
||||||
_connectionInfo = connectionInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t clientTaskId() const { return _clientTaskId; }
|
uint64_t clientTaskId() const { return _clientTaskId; }
|
||||||
void setClientTaskId(uint64_t clientTaskId) { _clientTaskId = clientTaskId; }
|
void setClientTaskId(uint64_t clientTaskId) { _clientTaskId = clientTaskId; }
|
||||||
|
@ -126,6 +123,12 @@ class GeneralRequest {
|
||||||
void setRequestPath(std::string const& requestPath) {
|
void setRequestPath(std::string const& requestPath) {
|
||||||
_requestPath = requestPath;
|
_requestPath = requestPath;
|
||||||
}
|
}
|
||||||
|
void setRequestPath(char const* begin) {
|
||||||
|
_requestPath = std::string(begin);
|
||||||
|
}
|
||||||
|
void setRequestPath(char const* begin, char const* end) {
|
||||||
|
_requestPath = std::string(begin, end - begin);
|
||||||
|
}
|
||||||
|
|
||||||
// The request path consists of the URL without the host and without any
|
// The request path consists of the URL without the host and without any
|
||||||
// parameters. The request path is split into two parts: the prefix, namely
|
// parameters. The request path is split into two parts: the prefix, namely
|
||||||
|
|
|
@ -296,7 +296,7 @@ void HttpRequest::parseHeader(size_t length) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pathBegin < pathEnd) {
|
if (pathBegin < pathEnd) {
|
||||||
setRequestPath(pathBegin);
|
setRequestPath(pathBegin, pathEnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (paramBegin < paramEnd) {
|
if (paramBegin < paramEnd) {
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
##python3-setuptools
|
||||||
|
##
|
||||||
|
##python setup.py install
|
||||||
|
##
|
||||||
|
##node npm
|
||||||
|
##
|
||||||
|
##https://github.com/GitbookIO/gitbook
|
||||||
|
## npm install gitbook-cli -g
|
||||||
|
##
|
||||||
|
## http://calibre-ebook.com/download
|
||||||
|
|
||||||
|
test_tools(){
|
||||||
|
if ! type easy_install3 >> /dev/null; then
|
||||||
|
echo "you are missing setuptools"
|
||||||
|
echo "apt-get install python-setuptools"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! type node >> /dev/null; then
|
||||||
|
echo "you are missing node"
|
||||||
|
echo "apt-get install nodejs nodejs-legacy"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! type npm >> /dev/null; then
|
||||||
|
echo "you are missing node"
|
||||||
|
echo "apt-get install npm"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! type calibre >> /dev/null; then
|
||||||
|
echo "you are missing node"
|
||||||
|
echo "apt-get install calibre-bin"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_tools(){
|
||||||
|
(
|
||||||
|
if ! [[ -f markdown-pp ]]; then
|
||||||
|
git clone https://github.com/arangodb-helper/markdown-pp/
|
||||||
|
fi
|
||||||
|
cd markdown-pp
|
||||||
|
python2 setup.py install --user
|
||||||
|
)
|
||||||
|
npm install gitbook-cli
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main(){
|
||||||
|
#test for basic tools
|
||||||
|
test_tools
|
||||||
|
|
||||||
|
#cd into target dir
|
||||||
|
mkdir -p "$1"
|
||||||
|
cd $1 || { echo "unable to change into $1"; exit 1; }
|
||||||
|
|
||||||
|
install_tools
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
|
|
Loading…
Reference in New Issue