1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/ArangoDB into v8_subsubmodule

This commit is contained in:
Wilfried Goesgens 2017-02-07 13:48:57 +01:00
commit 0932d51904
173 changed files with 2563 additions and 1776 deletions

View File

@ -1,13 +1,39 @@
devel
-----
* removed IndexThreadFeature, made --database.index-threads option obsolete
v3.2.alpha1 (2017-02-05)
------------------------
* added figure `httpRequests` to AQL query statistics
* removed revisions cache intermediate layer implementation
* obsoleted startup options `--database.revision-cache-chunk-size` and
`--database.revision-cache-target-size`
* fix potential port number over-/underruns
* added startup option `--log.shorten-filenames` for controlling whether filenames
in log message should be shortened to just the filename with the absolute path
* removed IndexThreadFeature, made `--database.index-threads` option obsolete
* changed index filling to make it more parallel, dispatch tasks to boost::asio
* more detailed stacktraces in Foxx apps
v3.1.11 (XXXX-XX-XX)
--------------------
* fixed sort issue in cluster, occurring when one of the local sort buffers of a
GatherNode was empty
* reduce number of HTTP requests made for certain kinds of join queries in cluster,
leading to speedup of some join queries
v3.1.10 (2017-XX-XX)
--------------------

View File

@ -82,11 +82,11 @@ set(ARANGODB_VERSION
# for the packages
set(ARANGODB_PACKAGE_VENDOR "ArangoDB GmbH")
set(ARANGODB_PACKAGE_CONTACT "info@arangodb.com")
set(ARANGODB_DISPLAY_NAME "ArangoDB")
set(ARANGODB_URL_INFO_ABOUT "https://www.arangodb.com")
set(ARANGODB_HELP_LINK "https://docs.arangodb.com/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
set(ARANGODB_CONTACT "hackers@arangodb.com")
set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
set(ARANGODB_DISPLAY_NAME "ArangoDB")
set(ARANGODB_URL_INFO_ABOUT "https://www.arangodb.com")
set(ARANGODB_HELP_LINK "https://docs.arangodb.com/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
set(ARANGODB_CONTACT "hackers@arangodb.com")
set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
# MSVC
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
@ -489,6 +489,8 @@ if (USE_MAINTAINER_MODE)
find_program(AWK_EXECUTABLE awk)
endif ()
find_program(FILE_EXECUTABLE file)
################################################################################
## FAILURE TESTS
################################################################################

View File

@ -18,34 +18,34 @@ then the commands you have to use are (you can use host names if they can be res
On 192.168.1.1:
```
arangod --server.endpoint tcp://0.0.0.0:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.supervision true --database.directory agency1 &
arangod --server.endpoint tcp://0.0.0.0:5001 --agency.my-address tcp://192.168.1.1:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.supervision true --database.directory agency
```
On 192.168.1.2:
```
arangod --server.endpoint tcp://0.0.0.0:5002 --server.authentication false --agency.activate true --agency.size 3 --agency.supervision true --database.directory agency2 &
arangod --server.endpoint tcp://0.0.0.0:5001 --agency.my-address tcp://192.168.1.2:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.supervision true --database.directory agency
```
On 192.168.1.3:
```
arangod --server.endpoint tcp://0.0.0.0:5003 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://192.168.1.1:5001 --agency.endpoint tcp://192.168.1.2:5002 --agency.endpoint tcp://192.168.1.3:5003 --agency.supervision true --database.directory agency3 &
arangod --server.endpoint tcp://0.0.0.0:5001 --agency.my-address tcp://192.168.1.3:5001 --server.authentication false --agency.activate true --agency.size 3 --agency.endpoint tcp://192.168.1.1:5001 --agency.endpoint tcp://192.168.1.2:5001 --agency.endpoint tcp://192.168.1.3:5001 --agency.supervision true --database.directory agency
```
On 192.168.1.1:
```
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://192.168.1.1:8529 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 --database.directory primary1 &
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8529 --cluster.my-address tcp://192.168.1.1:8529 --cluster.my-local-info db1 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5001 --cluster.agency-endpoint tcp://192.168.1.3:5001 --database.directory primary1 &
```
On 192.168.1.2:
```
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8530 --cluster.my-address tcp://192.168.1.2:8530 --cluster.my-local-info db2 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 --database.directory primary2 &
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8530 --cluster.my-address tcp://192.168.1.2:8530 --cluster.my-local-info db2 --cluster.my-role PRIMARY --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5001 --cluster.agency-endpoint tcp://192.168.1.3:5001 --database.directory primary2 &
```
On 192.168.1.3:
```
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8531 --cluster.my-address tcp://192.168.1.3:8531 --cluster.my-local-info coord1 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 --database.directory coordinator &
arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8531 --cluster.my-address tcp://192.168.1.3:8531 --cluster.my-local-info coord1 --cluster.my-role COORDINATOR --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5001 --cluster.agency-endpoint tcp://192.168.1.3:5001 --database.directory coordinator &
```
Obviously, it would no longer be necessary to use different port numbers on different servers. We have chosen to keep all port numbers in comparison to the local setup to minimize the necessary changes.
@ -54,11 +54,11 @@ If you want to setup secondaries, the following commands will do the job:
On 192.168.1.2:
curl -f -X PUT --data '{"primary": "DBServer001", "oldSecondary": "none", "newSecondary": "Secondary001"}' -H "Content-Type: application/json" http://192.168.1.3:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8629 --cluster.my-id Secondary001 --cluster.my-address tcp://192.168.1.2:8629 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 --database.directory secondary1 &
curl -f -X PUT --data '{"primary": "DBServer001", "oldSecondary": "none", "newSecondary": "Secondary001"}' -H "Content-Type: application/json" http://192.168.1.3:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8629 --cluster.my-id Secondary001 --cluster.my-address tcp://192.168.1.2:8629 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5001 --cluster.agency-endpoint tcp://192.168.1.3:5001 --database.directory secondary1 &
On 192.168.1.1:
curl -f -X PUT --data '{"primary": "DBServer002", "oldSecondary": "none", "newSecondary": "Secondary002"}' -H "Content-Type: application/json" http://localhost:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8630 --cluster.my-id Secondary002 --cluster.my-address tcp://192.168.1.1:8630 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5002 --cluster.agency-endpoint tcp://192.168.1.3:5003 --database.directory secondary2 &
curl -f -X PUT --data '{"primary": "DBServer002", "oldSecondary": "none", "newSecondary": "Secondary002"}' -H "Content-Type: application/json" http://localhost:8531/_admin/cluster/replaceSecondary && arangod --server.authentication=false --server.endpoint tcp://0.0.0.0:8630 --cluster.my-id Secondary002 --cluster.my-address tcp://192.168.1.1:8630 --cluster.agency-endpoint tcp://192.168.1.1:5001 --cluster.agency-endpoint tcp://192.168.1.2:5001 --cluster.agency-endpoint tcp://192.168.1.3:5001 --database.directory secondary2 &
Note that we have started the `Secondary002` on the same machine as `DBServer001` and `Secondary001` on the same machine as `DBServer002` to avoid that a complete pair is lost when a machine fails. Furthermore, note that ArangoDB does not yet perform automatic failover to the secondary, if a primary fails. This only works in the Apache Mesos setting. For synchronous replication, automatic failover always works and you do not need to setup secondaries for this.

View File

@ -90,6 +90,42 @@ Create a geo index for a hash array attribute:
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexCreateForArrayAttribute2
Use GeoIndex with AQL SORT statement:
@startDocuBlockInline geoIndexSortOptimization
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexSortOptimization}
~db._create("geoSort")
db.geoSort.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
| for (i = -90; i <= 90; i += 10) {
| for (j = -180; j <= 180; j += 10) {
| db.geoSort.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
| }
}
var query = "FOR doc in geoSort SORT distance(doc.latitude, doc.longitude, 0, 0) LIMIT 5 RETURN doc"
db._explain(query, {}, {colors: false});
db._query(query);
~db._drop("geoSort")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexSortOptimization
Use GeoIndex with AQL FILTER statement:
@startDocuBlockInline geoIndexFilterOptimization
@EXAMPLE_ARANGOSH_OUTPUT{geoIndexFilterOptimization}
~db._create("geoFilter")
db.geoFilter.ensureIndex({ type: "geo", fields: [ "latitude", "longitude" ] });
| for (i = -90; i <= 90; i += 10) {
| for (j = -180; j <= 180; j += 10) {
| db.geoFilter.save({ name : "Name/" + i + "/" + j, latitude : i, longitude : j });
| }
}
var query = "FOR doc in geoFilter FILTER distance(doc.latitude, doc.longitude, 0, 0) < 2000 RETURN doc"
db._explain(query, {}, {colors: false});
db._query(query);
~db._drop("geoFilter")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock geoIndexFilterOptimization
<!-- js/common/modules/@arangodb/arango-collection-common.js-->
@startDocuBlock collectionGeo

View File

@ -273,8 +273,10 @@ The geo index provides operations to find documents with coordinates nearest to
comparison coordinate, and to find documents with coordinates that are within a specifiable
radius around a comparison coordinate.
The geo index is used via dedicated functions in AQL or the simple queries functions,
but will not be used for other types of queries or conditions.
The geo index is used via dedicated functions in AQL, the simple queries
functions and it is implicitly applied when in AQL a SORT or FILTER is used with
the distance function. Otherwise it will not be used for other types of queries
or conditions.
### Fulltext Index

View File

@ -71,7 +71,10 @@ different usage scenarios:
{ "coords": [ 50.9406645, 6.9599115 ] }
Geo indexes will only be invoked via special functions.
Geo indexes will be invoked via special functions or AQL optimization. The
optimization can be triggered when a collection with geo index is enumerated
and a SORT or FILTER statement is used in conjunction with the distance
function.
- fulltext index: a fulltext index can be used to index all words contained in
a specific attribute of all documents in a collection. Only words with a

View File

@ -110,6 +110,8 @@ Creates a new collection with a given name. The request must contain an
object with the following attributes.
@RESTRETURNCODES
@RESTRETURNCODE{400}
If the *collection-name* is missing, then a *HTTP 400* is
returned.

View File

@ -120,11 +120,13 @@ fi
VERSION_MAJOR=`echo $VERSION | awk -F. '{print $1}'`
VERSION_MINOR=`echo $VERSION | awk -F. '{print $2}'`
VERSION_REVISION=`echo $VERSION | awk -F. '{print $3}'`
VERSION_PACKAGE="1"
cat CMakeLists.txt \
| sed -e "s~set(ARANGODB_VERSION_MAJOR.*~set(ARANGODB_VERSION_MAJOR \"$VERSION_MAJOR\")~" \
| sed -e "s~set(ARANGODB_VERSION_MINOR.*~set(ARANGODB_VERSION_MINOR \"$VERSION_MINOR\")~" \
| sed -e "s~set(ARANGODB_VERSION_REVISION.*~set(ARANGODB_VERSION_REVISION \"$VERSION_REVISION\")~" \
| sed -e "s~set(ARANGODB_PACKAGE_REVISION.*~set(ARANGODB_PACKAGE_REVISION \"$VERSION_PACKAGE\")~" \
> CMakeLists.txt.tmp
mv CMakeLists.txt.tmp CMakeLists.txt

View File

@ -190,7 +190,7 @@ mkdir -p %{buildroot}%{_piddir}
%{_datadir}/arangodb3/js/node
%files debuginfo
/usr/lib*/debug/*
/usr/lib*/debug/.build-id/*
## -----------------------------------------------------------------------------
## --SECTION-- post

View File

@ -425,6 +425,9 @@ Dependencies to build documentation:
npm install gitbook-cli -g
- [ditaa (DIagrams Through Ascii Art)](http://ditaa.sourceforge.net/) to build the
ascii art diagrams (optional)
- Calibre2 (optional, only required if you want to build the e-book version)
http://calibre-ebook.com/download
@ -807,9 +810,9 @@ Deploying a locally changed version
Create local docker images using the following repositories:
https://github.com/arangodb/arangodb-docker
https://github.com/arangodb/arangodb-mesos-docker
https://github.com/arangodb/arangodb-mesos-framework
- https://github.com/arangodb/arangodb-docker
- https://github.com/arangodb/arangodb-mesos-docker
- https://github.com/arangodb/arangodb-mesos-framework
Then adjust the docker images in the config (`arangodb3.json`) and redeploy it using the curl command above.

View File

@ -49,8 +49,6 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
// --SECTION-- macros
// -----------------------------------------------------------------------------
#define DELETE_ENDPOINT(e) if (e != 0) delete e;
#define FACTORY_NAME(name) name ## Factory
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
@ -58,12 +56,12 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
e = FACTORY(type, specification); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e);
delete e;
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
e = arangodb::Endpoint::serverFactory(specification, 1, true); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e);
delete e;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
@ -118,6 +116,11 @@ BOOST_AUTO_TEST_CASE (EndpointInvalid) {
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65536"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65537"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:-1"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:6555555555"));
}
////////////////////////////////////////////////////////////////////////////////
@ -491,7 +494,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer1) {
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -503,7 +506,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer2) {
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -516,7 +519,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer3) {
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -529,7 +532,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient1) {
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -541,7 +544,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient2) {
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -554,7 +557,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient3) {
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -575,7 +578,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerTcpIpv4WithPort) {
BOOST_CHECK_EQUAL(667, e->port());
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -596,7 +599,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerUnix) {
BOOST_CHECK_EQUAL(0, e->port());
BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -617,7 +620,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientSslIpV6WithPortHttp) {
BOOST_CHECK_EQUAL(43425, e->port());
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -637,7 +640,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientTcpIpv6WithoutPort) {
BOOST_CHECK_EQUAL(8529, e->port());
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
BOOST_AUTO_TEST_SUITE_END()

View File

@ -1,4 +1,4 @@
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// @brief test suite for files.c
///
/// @file
@ -38,6 +38,7 @@
using namespace arangodb::basics;
static bool Initialized = false;
static uint64_t counter = 0;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
@ -73,11 +74,10 @@ struct CFilesSetup {
}
StringBuffer* writeFile (const char* blob) {
static uint64_t counter = 0;
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
filename->appendText(_directory);
filename->appendText("/tmp-");
filename->appendChar(TRI_DIR_SEPARATOR_CHAR);
filename->appendText("tmp-");
filename->appendInteger(++counter);
filename->appendInteger(arangodb::RandomGenerator::interval(UINT32_MAX));
@ -108,6 +108,71 @@ struct CFilesSetup {
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
BOOST_AUTO_TEST_CASE (tst_createdirectory) {
std::ostringstream out;
out << _directory.c_str() << TRI_DIR_SEPARATOR_CHAR << "tmp-" << ++counter << "-dir";
std::string filename = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateDirectory(filename.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename.c_str()));
res = TRI_RemoveDirectory(filename.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_createdirectoryrecursive) {
std::ostringstream out;
out << _directory.c_str() << TRI_DIR_SEPARATOR_CHAR << "tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << TRI_DIR_SEPARATOR_CHAR << "abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectory(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_removedirectorydeterministic) {
std::ostringstream out;
out << _directory.c_str() << TRI_DIR_SEPARATOR_CHAR << "tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << TRI_DIR_SEPARATOR_CHAR << "abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectoryDeterministic(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test file exists
////////////////////////////////////////////////////////////////////////////////
@ -116,6 +181,7 @@ BOOST_AUTO_TEST_CASE (tst_existsfile) {
StringBuffer* filename = writeFile("");
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
TRI_UnlinkFile(filename->c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename->c_str()));
delete filename;
}

View File

@ -1,4 +1,4 @@
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
@ -449,7 +449,7 @@ std::string AgencyCommManager::path(std::string const& p1) {
return "";
}
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/");
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/");
}
std::string AgencyCommManager::path(std::string const& p1,
@ -458,8 +458,8 @@ std::string AgencyCommManager::path(std::string const& p1,
return "";
}
return MANAGER->_prefix + "/" + StringUtils::trim(p1, "/") + "/" +
StringUtils::trim(p2, "/");
return MANAGER->_prefix + "/" + basics::StringUtils::trim(p1, "/") + "/" +
basics::StringUtils::trim(p2, "/");
}
std::string AgencyCommManager::generateStamp() {
@ -673,7 +673,7 @@ void AgencyCommManager::removeEndpoint(std::string const& endpoint) {
}
std::string AgencyCommManager::endpointsString() const {
return StringUtils::join(endpoints(), ", ");
return basics::StringUtils::join(endpoints(), ", ");
}
std::vector<std::string> AgencyCommManager::endpoints() const {
@ -1279,7 +1279,7 @@ void AgencyComm::updateEndpoints(arangodb::velocypack::Slice const& current) {
for (const auto& i : VPackObjectIterator(current)) {
auto const endpoint = Endpoint::unifiedForm(i.value.copyString());
if (std::find(stored.begin(), stored.end(), endpoint) == stored.end()) {
LOG_TOPIC(INFO, Logger::CLUSTER)
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Adding endpoint " << endpoint << " to agent pool";
AgencyCommManager::MANAGER->addEndpoint(endpoint);
}
@ -1390,7 +1390,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
b.add(VPackValue(clientId));
}
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
"Failed agency comm (" << result._statusCode << ")! " <<
"Inquiring about clientId " << clientId << ".";
@ -1409,25 +1409,25 @@ AgencyCommResult AgencyComm::sendWithFailover(
for (auto const& i : VPackArrayIterator(inner)) {
if (i.isUInt()) {
if (i.getUInt() == 0) {
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed: " << outer.toJson();
return result;
} else {
success = true;
}
} else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed with " << outer.toJson();
}
}
}
}
if (success) {
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " succeeded (" << outer.toJson() << ")";
return inq;
} else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM)
<< body << " failed (" << outer.toJson() << ")";
return result;
}
@ -1436,7 +1436,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
}
return inq;
} else {
LOG_TOPIC(INFO, Logger::AGENCYCOMM) <<
LOG_TOPIC(DEBUG, Logger::AGENCYCOMM) <<
"Inquiry failed (" << inq._statusCode << "). Keep trying ...";
continue;
}

View File

@ -45,7 +45,7 @@ AgencyFeature::AgencyFeature(application_features::ApplicationServer* server)
_supervision(false),
_waitForSync(true),
_supervisionFrequency(5.0),
_compactionStepSize(2000),
_compactionStepSize(200000),
_compactionKeepSize(500),
_supervisionGracePeriod(15.0),
_cmdLineTimings(false)
@ -232,7 +232,7 @@ void AgencyFeature::start() {
_agent.reset(new consensus::Agent(consensus::config_t(
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
_agencyEndpoints, _supervision, _waitForSync, _supervisionFrequency,
_agencyEndpoints, _supervision, false, _supervisionFrequency,
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
_cmdLineTimings)));

View File

@ -257,7 +257,7 @@ bool Agent::recvAppendEntriesRPC(
term_t term, std::string const& leaderId, index_t prevIndex, term_t prevTerm,
index_t leaderCommitIndex, query_t const& queries) {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got AppendEntriesRPC from "
LOG_TOPIC(TRACE, Logger::AGENCY) << "Got AppendEntriesRPC from "
<< leaderId << " with term " << term;
// Update commit index
@ -276,40 +276,34 @@ bool Agent::recvAppendEntriesRPC(
size_t nqs = queries->slice().length();
// State machine, _lastCommitIndex to advance atomically
MUTEX_LOCKER(mutexLocker, _ioLock);
if (nqs > 0) {
MUTEX_LOCKER(mutexLocker, _ioLock);
size_t ndups = _state.removeConflicts(queries);
if (nqs > ndups) {
LOG_TOPIC(TRACE, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Appending " << nqs - ndups << " entries to state machine. ("
<< nqs << ", " << ndups << ")";
<< nqs << ", " << ndups << "): " << queries->slice().toJson() ;
try {
_state.log(queries, ndups);
_lastCommitIndex = _state.log(queries, ndups);
if (_lastCommitIndex >= _nextCompationAfter) {
_state.compact(_lastCommitIndex);
_nextCompationAfter += _config.compactionStepSize();
}
} catch (std::exception const&) {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Malformed query: " << __FILE__ << __LINE__;
}
}
}
_spearhead.apply(
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
_constituent.term());
_readDB.apply(
_state.slices(_lastCommitIndex + 1, leaderCommitIndex), _lastCommitIndex,
_constituent.term());
_lastCommitIndex = leaderCommitIndex;
if (_lastCommitIndex >= _nextCompationAfter) {
_state.compact(_lastCommitIndex);
_nextCompationAfter += _config.compactionStepSize();
}
return true;
}
@ -348,7 +342,7 @@ void Agent::sendAppendEntriesRPC() {
duration<double> m = system_clock::now() - _lastSent[followerId];
if (highest == _lastHighest[followerId] &&
m.count() < 0.5 * _config.minPing()) {
m.count() < 0.25 * _config.minPing()) {
continue;
}
@ -1122,10 +1116,10 @@ bool Agent::rebuildDBs() {
MUTEX_LOCKER(mutexLocker, _ioLock);
_spearhead.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
_constituent.term());
_readDB.apply(_state.slices(_lastCommitIndex + 1), _lastCommitIndex,
_constituent.term());
_spearhead.apply(
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
_readDB.apply(
_state.slices(0, _lastCommitIndex), _lastCommitIndex, _constituent.term());
return true;
}

View File

@ -37,7 +37,7 @@ config_t::config_t()
_supervision(false),
_waitForSync(true),
_supervisionFrequency(5.0),
_compactionStepSize(2000),
_compactionStepSize(200000),
_compactionKeepSize(500),
_supervisionGracePeriod(15.0),
_cmdLineTimings(false),
@ -706,7 +706,7 @@ bool config_t::merge(VPackSlice const& conf) {
_compactionStepSize = conf.get(compactionStepSizeStr).getUInt();
ss << _compactionStepSize << " (persisted)";
} else {
_compactionStepSize = 2000;
_compactionStepSize = 200000;
ss << _compactionStepSize << " (default)";
}
} else {

View File

@ -468,11 +468,13 @@ void Constituent::callElection() {
void Constituent::update(std::string const& leaderID, term_t t) {
MUTEX_LOCKER(guard, _castLock);
_term = t;
if (_leaderID != leaderID) {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Constituent::update: setting _leaderID to " << leaderID
<< " in term " << _term;
_leaderID = leaderID;
_role = FOLLOWER;
}
}
@ -546,6 +548,11 @@ void Constituent::run() {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << _leaderID
<< " in term " << _term;
} else {
{
MUTEX_LOCKER(guard, _castLock);
_role = FOLLOWER;
}
while (!this->isStopping()) {
if (_role == FOLLOWER) {
static double const M = 1.0e6;

View File

@ -541,8 +541,10 @@ void Inception::run() {
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
_agent->ready(true);
} else {
if (!this->isStopping()) {
LOG_TOPIC(FATAL, Logger::AGENCY)
<< "Unable to restart with persisted pool. Fatal exit.";
}
FATAL_ERROR_EXIT();
// FATAL ERROR
}

View File

@ -32,7 +32,6 @@
#include "Basics/StaticStrings.h"
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;

View File

@ -32,7 +32,6 @@
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;

View File

@ -184,7 +184,7 @@ class State {
size_t _cur;
/// @brief Operation options
OperationOptions _options;
arangodb::OperationOptions _options;
/// @brief Empty log entry;
static log_t emptyLog;

View File

@ -418,7 +418,6 @@ struct AstNode {
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
auto node = getAttributeAccessForVariable(allowIndexedAccess);
if (node == nullptr) {
return false;
}

View File

@ -44,9 +44,14 @@ Variable const* BaseExpressionContext::getVariable(size_t i) const {
AqlValue BaseExpressionContext::getVariableValue(Variable const* variable, bool doCopy, bool& mustDestroy) const {
mustDestroy = false;
TRI_ASSERT(_vars != nullptr);
TRI_ASSERT(_regs != nullptr);
TRI_ASSERT(_argv != nullptr);
size_t i = 0;
for (auto it = (*_vars).begin(); it != (*_vars).end(); ++it, ++i) {
if ((*it)->id == variable->id) {
TRI_ASSERT(i < _regs->size());
if (doCopy) {
mustDestroy = true; // as we are copying
return _argv->getValueReference(_startPos, (*_regs)[i]).clone();

View File

@ -35,7 +35,7 @@ class AqlItemBlock;
class ExecutionEngine;
class SingletonBlock : public ExecutionBlock {
class SingletonBlock final : public ExecutionBlock {
public:
SingletonBlock(ExecutionEngine* engine, SingletonNode const* ep)
: ExecutionBlock(engine, ep), _inputRegisterValues(nullptr), _whitelistBuilt(false) {}
@ -75,7 +75,7 @@ class SingletonBlock : public ExecutionBlock {
bool _whitelistBuilt;
};
class FilterBlock : public ExecutionBlock {
class FilterBlock final : public ExecutionBlock {
public:
FilterBlock(ExecutionEngine*, FilterNode const*);
@ -112,7 +112,7 @@ class FilterBlock : public ExecutionBlock {
BlockCollector _collector;
};
class LimitBlock : public ExecutionBlock {
class LimitBlock final : public ExecutionBlock {
public:
LimitBlock(ExecutionEngine* engine, LimitNode const* ep)
: ExecutionBlock(engine, ep),
@ -145,7 +145,7 @@ class LimitBlock : public ExecutionBlock {
bool const _fullCount;
};
class ReturnBlock : public ExecutionBlock {
class ReturnBlock final : public ExecutionBlock {
public:
ReturnBlock(ExecutionEngine* engine, ReturnNode const* ep)
: ExecutionBlock(engine, ep), _returnInheritedResults(false) {}
@ -168,7 +168,7 @@ class ReturnBlock : public ExecutionBlock {
bool _returnInheritedResults;
};
class NoResultsBlock : public ExecutionBlock {
class NoResultsBlock final : public ExecutionBlock {
public:
NoResultsBlock(ExecutionEngine* engine, NoResultsNode const* ep)
: ExecutionBlock(engine, ep) {}

View File

@ -30,7 +30,7 @@
using namespace arangodb::aql;
BlockCollector::BlockCollector() : _totalSize(0) {}
BlockCollector::BlockCollector() : _blocks{_arena}, _totalSize(0) {}
BlockCollector::~BlockCollector() { clear(); }

View File

@ -25,6 +25,7 @@
#define ARANGOD_AQL_BLOCK_COLLECTOR_H 1
#include "Basics/Common.h"
#include "Basics/SmallVector.h"
#include "Aql/types.h"
namespace arangodb {
@ -53,7 +54,8 @@ class BlockCollector {
AqlItemBlock* steal(ResourceMonitor*);
private:
std::vector<AqlItemBlock*> _blocks;
SmallVector<AqlItemBlock*>::allocator_type::arena_type _arena;
SmallVector<AqlItemBlock*> _blocks;
size_t _totalSize;
};

View File

@ -34,7 +34,7 @@ class AqlItemBlock;
class ExecutionEngine;
class CalculationBlock : public ExecutionBlock {
class CalculationBlock final : public ExecutionBlock {
public:
CalculationBlock(ExecutionEngine*, CalculationNode const*);

View File

@ -33,6 +33,7 @@
#include "Aql/AqlValue.h"
#include "Aql/BlockCollector.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionStats.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"
@ -329,6 +330,16 @@ AqlItemBlock* GatherBlock::getSome(size_t atLeast, size_t atMost) {
delete cur;
_gatherBlockBuffer.at(val.first).pop_front();
_gatherBlockPos.at(val.first) = std::make_pair(val.first, 0);
if (_gatherBlockBuffer.at(val.first).empty()) {
// if we pulled everything from the buffer, we need to fetch
// more data for the shard for which we have no more local
// values.
getBlock(val.first, atLeast, atMost);
// note that if getBlock() returns false here, this is not
// a problem, because the sort function used takes care of
// this
}
}
}
@ -1221,7 +1232,7 @@ std::unique_ptr<ClusterCommResult> RemoteBlock::sendRequest(
arangodb::rest::RequestType type, std::string const& urlPart,
std::string const& body) const {
DEBUG_BEGIN_BLOCK();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// Later, we probably want to set these sensibly:
ClientTransactionID const clientTransactionId = "AQL";
@ -1231,6 +1242,7 @@ std::unique_ptr<ClusterCommResult> RemoteBlock::sendRequest(
headers.emplace("Shard-Id", _ownName);
}
++_engine->_stats.httpRequests;
{
JobGuard guard(SchedulerFeature::SCHEDULER);
guard.block();
@ -1327,7 +1339,7 @@ int RemoteBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice();
if (slice.hasKey("code")) {
return slice.get("code").getNumericValue<int>();
}
@ -1361,9 +1373,14 @@ int RemoteBlock::shutdown(int errorCode) {
std::shared_ptr<VPackBuilder> builder =
VPackParser::fromJson(responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice();
// read "warnings" attribute if present and add it to our query
if (slice.isObject()) {
if (slice.hasKey("stats")) {
ExecutionStats newStats(slice.get("stats"));
_engine->_stats.add(newStats);
}
// read "warnings" attribute if present and add it to our query
VPackSlice warnings = slice.get("warnings");
if (warnings.isArray()) {
auto query = _engine->getQuery();
@ -1414,19 +1431,14 @@ AqlItemBlock* RemoteBlock::getSome(size_t atLeast, size_t atMost) {
res->result->getBodyVelocyPack();
VPackSlice responseBody = responseBodyBuilder->slice();
ExecutionStats newStats(responseBody.get("stats"));
_engine->_stats.addDelta(_deltaStats, newStats);
_deltaStats = newStats;
if (VelocyPackHelper::getBooleanValue(responseBody, "exhausted", true)) {
traceGetSomeEnd(nullptr);
return nullptr;
}
auto r = new arangodb::aql::AqlItemBlock(_engine->getQuery()->resourceMonitor(), responseBody);
traceGetSomeEnd(r);
return r;
auto r = std::make_unique<AqlItemBlock>(_engine->getQuery()->resourceMonitor(), responseBody);
traceGetSomeEnd(r.get());
return r.release();
// cppcheck-suppress style
DEBUG_END_BLOCK();

View File

@ -28,7 +28,6 @@
#include "Aql/ClusterNodes.h"
#include "Aql/ExecutionBlock.h"
#include "Aql/ExecutionNode.h"
#include "Aql/ExecutionStats.h"
#include "Rest/GeneralRequest.h"
namespace arangodb {
@ -339,9 +338,6 @@ class RemoteBlock : public ExecutionBlock {
/// @brief the ID of the query on the server as a string
std::string _queryId;
/// @brief the ID of the query on the server as a string
ExecutionStats _deltaStats;
/// @brief whether or not this block will forward initialize,
/// initializeCursor or shutDown requests
bool const _isResponsibleForInitializeCursor;

View File

@ -43,7 +43,7 @@ class ExecutionEngine;
typedef std::vector<Aggregator*> AggregateValuesType;
class SortedCollectBlock : public ExecutionBlock {
class SortedCollectBlock final : public ExecutionBlock {
private:
typedef std::vector<Aggregator*> AggregateValuesType;

View File

@ -41,7 +41,7 @@ struct Collection;
class CollectionScanner;
class ExecutionEngine;
class EnumerateCollectionBlock : public ExecutionBlock {
class EnumerateCollectionBlock final : public ExecutionBlock {
public:
EnumerateCollectionBlock(ExecutionEngine* engine,
EnumerateCollectionNode const* ep);

View File

@ -529,6 +529,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
VPackBuilder tmp;
query->ast()->variables()->toVelocyPack(tmp);
result.add("initialize", VPackValue(false));
result.add("variables", tmp.slice());
result.add("collections", VPackValue(VPackValueType::Array));
@ -594,7 +595,8 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
}
/// @brief aggregateQueryIds, get answers for all shards in a Scatter/Gather
void aggregateQueryIds(EngineInfo* info, arangodb::ClusterComm*& cc,
void aggregateQueryIds(EngineInfo* info,
std::shared_ptr<arangodb::ClusterComm>& cc,
arangodb::CoordTransactionID& coordTransactionID,
Collection* collection) {
// pick up the remote query ids
@ -1133,7 +1135,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
bool const isCoordinator =
arangodb::ServerState::instance()->isCoordinator(role);
bool const isDBServer = arangodb::ServerState::instance()->isDBServer(role);
TRI_ASSERT(queryRegistry != nullptr);
ExecutionEngine* engine = nullptr;
@ -1354,8 +1356,11 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
}
engine->_root = root;
root->initialize();
root->initializeCursor(nullptr, 0);
if (plan->isResponsibleForInitialize()) {
root->initialize();
root->initializeCursor(nullptr, 0);
}
return engine;
} catch (...) {

View File

@ -1139,7 +1139,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
regsToClear.emplace(r);
}
}
en->setRegsToClear(regsToClear);
en->setRegsToClear(std::move(regsToClear));
}
}

View File

@ -586,8 +586,8 @@ class ExecutionNode {
void toVelocyPackHelperGeneric(arangodb::velocypack::Builder&, bool) const;
/// @brief set regs to be deleted
void setRegsToClear(std::unordered_set<RegisterId> const& toClear) {
_regsToClear = toClear;
void setRegsToClear(std::unordered_set<RegisterId>&& toClear) {
_regsToClear = std::move(toClear);
}
protected:

View File

@ -177,10 +177,12 @@ ExecutionPlan::ExecutionPlan(Ast* ast)
: _ids(),
_root(nullptr),
_varUsageComputed(false),
_isResponsibleForInitialize(true),
_nextId(0),
_ast(ast),
_lastLimitNode(nullptr),
_subqueries() {}
_subqueries() {
}
/// @brief destroy the plan, frees all assigned nodes
ExecutionPlan::~ExecutionPlan() {
@ -280,6 +282,7 @@ ExecutionPlan* ExecutionPlan::clone() {
plan->_root = _root->clone(plan.get(), true, false);
plan->_nextId = _nextId;
plan->_appliedRules = _appliedRules;
plan->_isResponsibleForInitialize = _isResponsibleForInitialize;
CloneNodeAdder adder(plan.get());
plan->_root->walk(&adder);
@ -348,6 +351,7 @@ void ExecutionPlan::toVelocyPack(VPackBuilder& builder, Ast* ast, bool verbose)
size_t nrItems = 0;
builder.add("estimatedCost", VPackValue(_root->getCost(nrItems)));
builder.add("estimatedNrItems", VPackValue(nrItems));
builder.add("initialize", VPackValue(_isResponsibleForInitialize));
builder.close();
}
@ -1882,17 +1886,22 @@ void ExecutionPlan::insertDependency(ExecutionNode* oldNode,
/// @brief create a plan from VPack
ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
ExecutionNode* ret = nullptr;
if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan slice is not an object");
}
if (slice.hasKey("initialize")) {
// whether or not this plan (or fragment) is responsible for calling initialize
_isResponsibleForInitialize = slice.get("initialize").getBoolean();
}
VPackSlice nodes = slice.get("nodes");
if (!nodes.isArray()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan \"nodes\" attribute is not an array");
}
ExecutionNode* ret = nullptr;
// first, re-create all nodes from the Slice, using the node ids
// no dependency links will be set up in this step

View File

@ -75,6 +75,8 @@ class ExecutionPlan {
/// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); }
bool isResponsibleForInitialize() const { return _isResponsibleForInitialize; }
/// @brief note that an optimizer rule was applied
inline void addAppliedRule(int level) { _appliedRules.emplace_back(level); }
@ -299,6 +301,8 @@ class ExecutionPlan {
/// @brief flag to indicate whether the variable usage is computed
bool _varUsageComputed;
bool _isResponsibleForInitialize;
/// @brief auto-increment sequence for node ids
size_t _nextId;

View File

@ -38,6 +38,7 @@ void ExecutionStats::toVelocyPack(VPackBuilder& builder) const {
builder.add("scannedFull", VPackValue(scannedFull));
builder.add("scannedIndex", VPackValue(scannedIndex));
builder.add("filtered", VPackValue(filtered));
builder.add("httpRequests", VPackValue(httpRequests));
if (fullCount > -1) {
// fullCount is exceptional. it has a default value of -1 and is
@ -56,6 +57,7 @@ void ExecutionStats::toVelocyPackStatic(VPackBuilder& builder) {
builder.add("scannedFull", VPackValue(0));
builder.add("scannedIndex", VPackValue(0));
builder.add("filtered", VPackValue(0));
builder.add("httpRequests", VPackValue(0));
builder.add("fullCount", VPackValue(-1));
builder.add("executionTime", VPackValue(0.0));
builder.close();
@ -67,12 +69,12 @@ ExecutionStats::ExecutionStats()
scannedFull(0),
scannedIndex(0),
filtered(0),
httpRequests(0),
fullCount(-1),
executionTime(0.0) {}
ExecutionStats::ExecutionStats(VPackSlice const& slice)
: fullCount(-1),
executionTime(0.0) {
: ExecutionStats() {
if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"stats is not an object");
@ -83,6 +85,10 @@ ExecutionStats::ExecutionStats(VPackSlice const& slice)
scannedFull = slice.get("scannedFull").getNumber<int64_t>();
scannedIndex = slice.get("scannedIndex").getNumber<int64_t>();
filtered = slice.get("filtered").getNumber<int64_t>();
if (slice.hasKey("httpRequests")) {
httpRequests = slice.get("httpRequests").getNumber<int64_t>();
}
// note: fullCount is an optional attribute!
if (slice.hasKey("fullCount")) {

View File

@ -56,21 +56,24 @@ struct ExecutionStats {
writesIgnored += summand.writesIgnored;
scannedFull += summand.scannedFull;
scannedIndex += summand.scannedIndex;
fullCount += summand.fullCount;
filtered += summand.filtered;
httpRequests += summand.httpRequests;
if (summand.fullCount > 0) {
// fullCount may be negative, don't add it then
fullCount += summand.fullCount;
}
// intentionally no modification of executionTime
}
/// @brief sumarize the delta of two other sets of ExecutionStats to us
void addDelta(ExecutionStats const& lastStats,
ExecutionStats const& newStats) {
writesExecuted += newStats.writesExecuted - lastStats.writesExecuted;
writesIgnored += newStats.writesIgnored - lastStats.writesIgnored;
scannedFull += newStats.scannedFull - lastStats.scannedFull;
scannedIndex += newStats.scannedIndex - lastStats.scannedIndex;
fullCount += newStats.fullCount - lastStats.fullCount;
filtered += newStats.filtered - lastStats.filtered;
// intentionally no modification of executionTime
void clear() {
writesExecuted = 0;
writesIgnored = 0;
scannedFull = 0;
scannedIndex = 0;
filtered = 0;
httpRequests = 0;
fullCount = -1;
executionTime = 0.0;
}
/// @brief number of successfully executed write operations
@ -87,6 +90,9 @@ struct ExecutionStats {
/// @brief number of documents filtered away
int64_t filtered;
/// @brief total number of HTTP requests made
int64_t httpRequests;
/// @brief total number of results, before applying last limit
int64_t fullCount;

View File

@ -59,7 +59,7 @@ struct NonConstExpression {
~NonConstExpression() { delete expression; }
};
class IndexBlock : public ExecutionBlock {
class IndexBlock final : public ExecutionBlock {
public:
IndexBlock(ExecutionEngine* engine, IndexNode const* ep);

View File

@ -47,18 +47,20 @@ Optimizer::Optimizer(size_t maxNumberOfPlans)
setupRules();
}
}
size_t Optimizer::hasEnoughPlans(size_t extraPlans) const {
return (_newPlans.size() + extraPlans >= _maxNumberOfPlans);
}
// @brief add a plan to the optimizer
bool Optimizer::addPlan(ExecutionPlan* plan, Rule const* rule, bool wasModified,
void Optimizer::addPlan(std::unique_ptr<ExecutionPlan> plan, Rule const* rule, bool wasModified,
int newLevel) {
TRI_ASSERT(plan != nullptr);
if (newLevel > 0) {
// use user-specified new level
_newPlans.push_back(plan, newLevel);
} else {
if (newLevel <= 0) {
// use rule's level
_newPlans.push_back(plan, rule->level);
newLevel = rule->level;
// else use user-specified new level
}
if (wasModified) {
@ -72,31 +74,31 @@ bool Optimizer::addPlan(ExecutionPlan* plan, Rule const* rule, bool wasModified,
plan->invalidateCost();
plan->findVarUsage();
}
if (_newPlans.size() >= _maxNumberOfPlans) {
return false;
}
return true;
// hand over ownership
_newPlans.push_back(plan.get(), newLevel);
plan.release();
}
// @brief the actual optimization
int Optimizer::createPlans(ExecutionPlan* plan,
std::vector<std::string> const& rulesSpecification,
bool inspectSimplePlans) {
// _plans contains the previous optimization result
_plans.clear();
try {
_plans.push_back(plan, 0);
} catch (...) {
delete plan;
throw;
}
if (!inspectSimplePlans &&
!arangodb::ServerState::instance()->isCoordinator() &&
plan->isDeadSimple()) {
// the plan is so simple that any further optimizations would probably cost
// more than simply executing the plan
_plans.clear();
try {
_plans.push_back(plan, 0);
} catch (...) {
delete plan;
throw;
}
estimatePlans();
return TRI_ERROR_NO_ERROR;
@ -111,15 +113,6 @@ int Optimizer::createPlans(ExecutionPlan* plan,
// which optimizer rules are disabled?
std::unordered_set<int> disabledIds(getDisabledRuleIds(rulesSpecification));
// _plans contains the previous optimization result
_plans.clear();
try {
_plans.push_back(plan, 0);
} catch (...) {
delete plan;
throw;
}
_newPlans.clear();
while (leastDoneLevel < maxRuleLevel) {
@ -134,11 +127,12 @@ int Optimizer::createPlans(ExecutionPlan* plan,
// For all current plans:
while (_plans.size() > 0) {
int level;
auto p = _plans.pop_front(level);
std::unique_ptr<ExecutionPlan> p(_plans.pop_front(level));
if (level >= maxRuleLevel) {
_newPlans.push_back(p, level); // nothing to do, just keep it
} else { // find next rule
_newPlans.push_back(p.get(), level); // nothing to do, just keep it
p.release();
} else { // find next rule
auto it = _rules.upper_bound(level);
TRI_ASSERT(it != _rules.end());
@ -158,7 +152,8 @@ int Optimizer::createPlans(ExecutionPlan* plan,
// we picked a disabled rule or we have reached the max number of
// plans and just skip this rule
_newPlans.push_back(p, level); // nothing to do, just keep it
_newPlans.push_back(p.get(), level); // nothing to do, just keep it
p.release();
if (!rule.isHidden) {
++_stats.rulesSkipped;
@ -167,35 +162,27 @@ int Optimizer::createPlans(ExecutionPlan* plan,
continue;
}
try {
TRI_IF_FAILURE("Optimizer::createPlansOom") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("Optimizer::createPlansOom") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
if (!p->varUsageComputed()) {
p->findVarUsage();
}
if (!p->varUsageComputed()) {
p->findVarUsage();
}
// all optimizer rule functions must obey the following guidelines:
// - the original plan passed to the rule function must be deleted if
// and only
// if it has not been added (back) to the optimizer (using addPlan).
// - if the rule throws, then the original plan will be deleted by the
// optimizer.
// thus the rule must not have deleted the plan itself or add it
// back to the
// optimizer
rule.func(this, p, &rule);
// all optimizer rule functions must obey the following guidelines:
// - the original plan passed to the rule function must be deleted if
// and only
// if it has not been added (back) to the optimizer (using addPlan).
// - if the rule throws, then the original plan will be deleted by the
// optimizer.
// thus the rule must not have deleted the plan itself or add it
// back to the
// optimizer
rule.func(this, std::move(p), &rule);
if (!rule.isHidden) {
++_stats.rulesExecuted;
}
} catch (...) {
if (!_newPlans.isContained(p)) {
// only delete the plan if not yet contained in _newPlans
delete p;
}
throw;
if (!rule.isHidden) {
++_stats.rulesExecuted;
}
}

View File

@ -27,12 +27,11 @@
#include "Basics/Common.h"
#include "Aql/ExecutionPlan.h"
#include "Basics/MutexLocker.h"
#include "Basics/RollingVector.h"
#include <velocypack/Builder.h>
#include <velocypack/velocypack-aliases.h>
#include <deque>
namespace arangodb {
namespace aql {
@ -216,7 +215,7 @@ class Optimizer {
/// set the level of the appended plan to the largest level of rule
/// that ought to be considered as done to indicate which rule is to be
/// applied next.
typedef std::function<void(Optimizer*, ExecutionPlan*, Rule const*)>
typedef std::function<void(Optimizer*, std::unique_ptr<ExecutionPlan>, Rule const*)>
RuleFunction;
/// @brief type of an optimizer rule
@ -243,10 +242,13 @@ class Optimizer {
/// @brief the following struct keeps a list (deque) of ExecutionPlan*
/// and has some automatic convenience functions.
struct PlanList {
std::deque<ExecutionPlan*> list;
std::deque<int> levelDone;
RollingVector<ExecutionPlan*> list;
RollingVector<int> levelDone;
PlanList() {}
PlanList() {
list.reserve(8);
levelDone.reserve(8);
}
/// @brief constructor with a plan
PlanList(ExecutionPlan* p, int level) { push_back(p, level); }
@ -296,13 +298,8 @@ class Optimizer {
/// @brief steals all the plans in b and clears b at the same time
void steal(PlanList& b) {
list.swap(b.list);
levelDone.swap(b.levelDone);
for (auto& p : b.list) {
delete p;
}
b.list.clear();
b.levelDone.clear();
list = std::move(b.list);
levelDone = std::move(b.levelDone);
}
/// @brief appends all the plans to the target and clears *this at the same
@ -351,9 +348,10 @@ class Optimizer {
/// stealPlans.
int createPlans(ExecutionPlan* p, std::vector<std::string> const&, bool);
size_t hasEnoughPlans(size_t extraPlans) const;
/// @brief add a plan to the optimizer
/// returns false if there are already enough plans, true otherwise
bool addPlan(ExecutionPlan*, Rule const*, bool, int newLevel = 0);
void addPlan(std::unique_ptr<ExecutionPlan>, Rule const*, bool, int newLevel = 0);
/// @brief getBest, ownership of the plan remains with the optimizer
ExecutionPlan* getBest() {
@ -364,7 +362,7 @@ class Optimizer {
}
/// @brief getPlans, ownership of the plans remains with the optimizer
std::deque<ExecutionPlan*>& getPlans() { return _plans.list; }
RollingVector<ExecutionPlan*>& getPlans() { return _plans.list; }
/// @brief stealBest, ownership of the plan is handed over to the caller,
/// all other plans are deleted
@ -389,9 +387,8 @@ class Optimizer {
/// @brief stealPlans, ownership of the plans is handed over to the caller,
/// the optimizer will forget about them!
std::deque<ExecutionPlan*> stealPlans() {
std::deque<ExecutionPlan*> res;
res.swap(_plans.list);
RollingVector<ExecutionPlan*> stealPlans() {
RollingVector<ExecutionPlan*> res(std::move(_plans.list));
_plans.levelDone.clear();
return res;
}

File diff suppressed because it is too large Load Diff

View File

@ -32,85 +32,85 @@ namespace arangodb {
namespace aql {
/// @brief adds a SORT operation for IN right-hand side operands
void sortInValuesRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void sortInValuesRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief remove redundant sorts
/// this rule modifies the plan in place:
/// - sorts that are covered by earlier sorts will be removed
void removeRedundantSortsRule(Optimizer*, ExecutionPlan*,
void removeRedundantSortsRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief remove all unnecessary filters
/// this rule modifies the plan in place:
/// - filters that are always true are removed completely
/// - filters that are always false will be replaced by a NoResults node
void removeUnnecessaryFiltersRule(Optimizer*, ExecutionPlan*,
void removeUnnecessaryFiltersRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief remove unused INTO variable from COLLECT, or unused aggregates
void removeCollectVariablesRule(Optimizer*, ExecutionPlan*,
void removeCollectVariablesRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief propagate constant attributes in FILTERs
void propagateConstantAttributesRule(Optimizer*, ExecutionPlan*,
void propagateConstantAttributesRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief remove SORT RAND() if appropriate
void removeSortRandRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void removeSortRandRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief move calculations up in the plan
/// this rule modifies the plan in place
/// it aims to move up calculations as far up in the plan as possible, to
/// avoid redundant calculations in inner loops
void moveCalculationsUpRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void moveCalculationsUpRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief move calculations down in the plan
/// this rule modifies the plan in place
/// it aims to move down calculations as far down in the plan as possible,
/// beyond FILTER and LIMIT statements
void moveCalculationsDownRule(Optimizer*, ExecutionPlan*,
void moveCalculationsDownRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief determine the "right" type of CollectNode and
/// add a sort node for each COLLECT (may be removed later)
/// this rule cannot be turned off (otherwise, the query result might be wrong!)
void specializeCollectRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void specializeCollectRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief split and-combined filters and break them into smaller parts
void splitFiltersRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void splitFiltersRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief move filters up in the plan
/// this rule modifies the plan in place
/// filters are moved as far up in the plan as possible to make result sets
/// as small as possible as early as possible
/// filters are not pushed beyond limits
void moveFiltersUpRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void moveFiltersUpRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief remove redundant CalculationNodes
void removeRedundantCalculationsRule(Optimizer*, ExecutionPlan*,
void removeRedundantCalculationsRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief remove CalculationNodes and SubqueryNodes that are never needed
void removeUnnecessaryCalculationsRule(Optimizer*, ExecutionPlan*,
void removeUnnecessaryCalculationsRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief useIndex, try to use an index for filtering
void useIndexesRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void useIndexesRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief try to use the index for sorting
void useIndexForSortRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void useIndexForSortRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief try to remove filters which are covered by indexes
void removeFiltersCoveredByIndexRule(Optimizer*, ExecutionPlan*,
void removeFiltersCoveredByIndexRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief interchange adjacent EnumerateCollectionNodes in all possible ways
void interchangeAdjacentEnumerationsRule(Optimizer*, ExecutionPlan*,
void interchangeAdjacentEnumerationsRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief scatter operations in cluster - send all incoming rows to all remote
/// clients
void scatterInClusterRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void scatterInClusterRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief distribute operations in cluster - send each incoming row to every
/// remote client precisely once. This happens in queries like:
@ -119,26 +119,26 @@ void scatterInClusterRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
///
/// where coll2 is sharded by _key, but not if it is sharded by anything else.
/// The collections coll1 and coll2 do not have to be distinct for this.
void distributeInClusterRule(Optimizer*, ExecutionPlan*,
void distributeInClusterRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
#ifdef USE_ENTERPRISE
void distributeInClusterRuleSmartEdgeCollection(Optimizer*, ExecutionPlan*,
void distributeInClusterRuleSmartEdgeCollection(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief remove scatter/gather and remote nodes for satellite collections
void removeSatelliteJoinsRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void removeSatelliteJoinsRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
#endif
void distributeFilternCalcToClusterRule(Optimizer*, ExecutionPlan*,
void distributeFilternCalcToClusterRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
void distributeSortToClusterRule(Optimizer*, ExecutionPlan*,
void distributeSortToClusterRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief try to get rid of a RemoteNode->ScatterNode combination which has
/// only a SingletonNode and possibly some CalculationNodes as dependencies
void removeUnnecessaryRemoteScatterRule(Optimizer*, ExecutionPlan*,
void removeUnnecessaryRemoteScatterRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief this rule removes Remote-Gather-Scatter/Distribute-Remote nodes from
@ -166,7 +166,7 @@ void removeUnnecessaryRemoteScatterRule(Optimizer*, ExecutionPlan*,
///
/// where f is some function.
///
void undistributeRemoveAfterEnumCollRule(Optimizer*, ExecutionPlan*,
void undistributeRemoveAfterEnumCollRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief this rule replaces expressions of the type:
@ -175,33 +175,33 @@ void undistributeRemoveAfterEnumCollRule(Optimizer*, ExecutionPlan*,
// x.val IN [1,2,3]
// when the OR conditions are present in the same FILTER node, and refer to the
// same (single) attribute.
void replaceOrWithInRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void replaceOrWithInRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
void removeRedundantOrRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void removeRedundantOrRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
/// @brief remove $OLD and $NEW variables from data-modification statements
/// if not required
void removeDataModificationOutVariablesRule(Optimizer*, ExecutionPlan*,
void removeDataModificationOutVariablesRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief patch UPDATE statement on single collection that iterates over the
/// entire collection to operate in batches
void patchUpdateStatementsRule(Optimizer*, ExecutionPlan*,
void patchUpdateStatementsRule(Optimizer*, std::unique_ptr<ExecutionPlan>,
Optimizer::Rule const*);
/// @brief optimizes away unused traversal output variables and
/// merges filter nodes into graph traversal nodes
void optimizeTraversalsRule(Optimizer* opt, ExecutionPlan* plan,
void optimizeTraversalsRule(Optimizer* opt, std::unique_ptr<ExecutionPlan> plan,
Optimizer::Rule const* rule);
/// @brief prepares traversals for execution (hidden rule)
void prepareTraversalsRule(Optimizer* opt, ExecutionPlan* plan,
void prepareTraversalsRule(Optimizer* opt, std::unique_ptr<ExecutionPlan> plan,
Optimizer::Rule const* rule);
/// @brief moves simple subqueries one level higher
void inlineSubqueriesRule(Optimizer*, ExecutionPlan*, Optimizer::Rule const*);
void inlineSubqueriesRule(Optimizer*, std::unique_ptr<ExecutionPlan>, Optimizer::Rule const*);
void geoIndexRule(Optimizer* opt, ExecutionPlan* plan, Optimizer::Rule const* rule);
void geoIndexRule(Optimizer* opt, std::unique_ptr<ExecutionPlan> plan, Optimizer::Rule const* rule);
} // namespace aql
} // namespace arangodb

View File

@ -156,7 +156,6 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_plan(nullptr),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
@ -224,7 +223,6 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_plan(nullptr),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
@ -313,7 +311,7 @@ Query* Query::clone(QueryPart part, bool withPlan) {
if (_plan != nullptr) {
if (withPlan) {
// clone the existing plan
clone->setPlan(_plan->clone(*clone));
clone->_plan.reset(_plan->clone(*clone));
}
// clone all variables
@ -326,7 +324,7 @@ Query* Query::clone(QueryPart part, bool withPlan) {
if (clone->_plan == nullptr) {
// initialize an empty plan
clone->setPlan(new ExecutionPlan(ast()));
clone->_plan.reset(new ExecutionPlan(ast()));
}
TRI_ASSERT(clone->_trx == nullptr);
@ -566,7 +564,7 @@ QueryResult Query::prepare(QueryRegistry* registry) {
// If all went well so far, then we keep _plan, _parser and _trx and
// return:
_plan = plan.release();
_plan = std::move(plan);
_parser = parser.release();
_engine = engine;
return QueryResult();
@ -731,14 +729,12 @@ QueryResult Query::execute(QueryRegistry* registry) {
}
_trx->commit();
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get()));
result.context = _trx->transactionContext();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR);
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
@ -913,18 +909,15 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
_trx->commit();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get()));
result.context = _trx->transactionContext();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::executeV8: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this;
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR);
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
@ -1387,10 +1380,13 @@ std::string Query::getStateString() const {
}
/// @brief cleanup plan and engine for current query
void Query::cleanupPlanAndEngine(int errorCode) {
void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
if (_engine != nullptr) {
try {
_engine->shutdown(errorCode);
if (statsBuilder != nullptr) {
_engine->_stats.toVelocyPack(*statsBuilder);
}
} catch (...) {
// shutdown may fail but we must not throw here
// (we're also called from the destructor)
@ -1410,18 +1406,7 @@ void Query::cleanupPlanAndEngine(int errorCode) {
_parser = nullptr;
}
if (_plan != nullptr) {
delete _plan;
_plan = nullptr;
}
}
/// @brief set the plan for the query
void Query::setPlan(ExecutionPlan* plan) {
if (_plan != nullptr) {
delete _plan;
}
_plan = plan;
_plan.reset();
}
/// @brief create a TransactionContext

View File

@ -268,16 +268,13 @@ class Query {
inline arangodb::Transaction* trx() { return _trx; }
/// @brief get the plan for the query
ExecutionPlan* plan() const { return _plan; }
ExecutionPlan* plan() const { return _plan.get(); }
/// @brief whether or not the query returns verbose error messages
bool verboseErrors() const {
return getBooleanOption("verboseErrors", false);
}
/// @brief set the plan for the query
void setPlan(ExecutionPlan* plan);
/// @brief enter a V8 context
void enterContext();
@ -378,7 +375,7 @@ class Query {
void enterState(ExecutionState);
/// @brief cleanup plan and engine for current query
void cleanupPlanAndEngine(int);
void cleanupPlanAndEngine(int, VPackBuilder* statsBuilder = nullptr);
/// @brief create a TransactionContext
std::shared_ptr<arangodb::TransactionContext> createTransactionContext();
@ -438,7 +435,7 @@ class Query {
ExecutionState _state;
/// @brief the ExecutionPlan object, if the query is prepared
ExecutionPlan* _plan;
std::unique_ptr<ExecutionPlan> _plan;
/// @brief the Parser object, if the query is prepared
Parser* _parser;

View File

@ -697,7 +697,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try {
res = query->trx()->lockCollections();
} catch (...) {
LOG(ERR) << "lock lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"lock lead to an exception");
@ -726,15 +725,10 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
if (items.get() == nullptr) {
answerBuilder.add("exhausted", VPackValue(true));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else {
try {
items->toVelocyPack(query->trx(), answerBuilder);
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) {
LOG(ERR) << "cannot transform AqlItemBlock to VelocyPack";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"cannot transform AqlItemBlock to VelocyPack");
@ -760,7 +754,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
skipped = block->skipSomeForShard(atLeast, atMost, shardId);
}
} catch (...) {
LOG(ERR) << "skipSome lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"skipSome lead to an exception");
@ -768,8 +761,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("skipped", VPackValue(static_cast<double>(skipped)));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "skip") {
auto number =
VelocyPackHelper::getNumericValue<size_t>(querySlice, "number", 1);
@ -789,10 +780,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("exhausted", VPackValue(exhausted));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) {
LOG(ERR) << "skip lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"skip lead to an exception");
@ -803,7 +791,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try {
res = query->engine()->initialize();
} catch (...) {
LOG(ERR) << "initialize lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"initialize lead to an exception");
@ -825,7 +812,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
res = query->engine()->initializeCursor(items.get(), pos);
}
} catch (...) {
LOG(ERR) << "initializeCursor lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"initializeCursor lead to an exception");
@ -833,8 +819,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(static_cast<double>(res)));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "shutdown") {
int res = TRI_ERROR_INTERNAL;
int errorCode = VelocyPackHelper::getNumericValue<int>(
@ -854,7 +838,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
_queryRegistry->destroy(_vocbase, _qId, errorCode);
_qId = 0;
} catch (...) {
LOG(ERR) << "shutdown lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"shutdown lead to an exception");
@ -863,7 +846,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(res));
} else {
LOG(ERR) << "Unknown operation!";
generateError(rest::ResponseCode::NOT_FOUND,
TRI_ERROR_HTTP_NOT_FOUND);
return;
@ -875,7 +857,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
generateError(rest::ResponseCode::BAD, e.code());
return;
} catch (...) {
LOG(ERR) << "OUT OF MEMORY when handling query.";
generateError(rest::ResponseCode::BAD, TRI_ERROR_OUT_OF_MEMORY);
return;
}

View File

@ -37,7 +37,7 @@ class AqlItemBlock;
class ExecutionEngine;
class SortBlock : public ExecutionBlock {
class SortBlock final : public ExecutionBlock {
public:
SortBlock(ExecutionEngine*, SortNode const*);

View File

@ -37,7 +37,7 @@ class ManagedDocumentResult;
namespace aql {
class TraversalBlock : public ExecutionBlock {
class TraversalBlock final : public ExecutionBlock {
public:
TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep);

View File

@ -37,7 +37,25 @@
#include "Utils/Transaction.h"
#include "VocBase/ticks.h"
#include <thread>
using namespace arangodb;
using namespace arangodb::communicator;
//////////////////////////////////////////////////////////////////////////////
/// @brief the pointer to the singleton instance
//////////////////////////////////////////////////////////////////////////////
std::shared_ptr<ClusterComm> arangodb::ClusterComm::_theInstance;
//////////////////////////////////////////////////////////////////////////////
/// @brief the following atomic int is 0 in the beginning, is set to 1
/// if some thread initializes the singleton and is 2 once _theInstance
/// is set. Note that after a shutdown has happened, _theInstance can be
/// a nullptr, which means no new ClusterComm operations can be started.
//////////////////////////////////////////////////////////////////////////////
std::atomic<int> arangodb::ClusterComm::_theInstanceInit(0);
////////////////////////////////////////////////////////////////////////////////
/// @brief routine to set the destination
@ -234,9 +252,32 @@ ClusterComm::~ClusterComm() {
/// @brief getter for our singleton instance
////////////////////////////////////////////////////////////////////////////////
ClusterComm* ClusterComm::instance() {
static ClusterComm* Instance = new ClusterComm();
return Instance;
std::shared_ptr<ClusterComm> ClusterComm::instance() {
int state = _theInstanceInit;
if (state < 2) {
// Try to set from 0 to 1:
while (state == 0) {
if (_theInstanceInit.compare_exchange_weak(state, 1)) {
break;
}
}
// Now _state is either 0 (in which case we have changed _theInstanceInit
// to 1, or is 1, in which case somebody else has set it to 1 and is working
// to initialize the singleton, or is 2, in which case somebody else has
// done all the work and we are done:
if (state == 0) {
// we must initialize (cannot use std::make_shared here because
// constructor is private), if we throw here, everything is broken:
ClusterComm* cc = new ClusterComm();
_theInstance = std::shared_ptr<ClusterComm>(cc);
_theInstanceInit = 2;
} else if (state == 1) {
while (_theInstanceInit < 2) {
std::this_thread::yield();
}
}
}
return _theInstance;
}
////////////////////////////////////////////////////////////////////////////////
@ -244,7 +285,7 @@ ClusterComm* ClusterComm::instance() {
////////////////////////////////////////////////////////////////////////////////
void ClusterComm::initialize() {
auto* i = instance();
auto i = instance(); // this will create the static instance
i->startBackgroundThread();
}
@ -253,10 +294,8 @@ void ClusterComm::initialize() {
////////////////////////////////////////////////////////////////////////////////
void ClusterComm::cleanup() {
auto i = instance();
TRI_ASSERT(i != nullptr);
delete i;
_theInstance.reset(); // no more operations will be started, but running
// ones have their copy of the shared_ptr
}
////////////////////////////////////////////////////////////////////////////////
@ -747,7 +786,7 @@ void ClusterComm::cleanupAllQueues() {
}
ClusterCommThread::ClusterCommThread() : Thread("ClusterComm"), _cc(nullptr) {
_cc = ClusterComm::instance();
_cc = ClusterComm::instance().get();
}
ClusterCommThread::~ClusterCommThread() { shutdown(); }
@ -759,7 +798,7 @@ ClusterCommThread::~ClusterCommThread() { shutdown(); }
void ClusterCommThread::beginShutdown() {
Thread::beginShutdown();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc != nullptr) {
CONDITION_LOCKER(guard, cc->somethingToSend);

View File

@ -43,8 +43,6 @@
#include "VocBase/voc-types.h"
namespace arangodb {
using namespace communicator;
class ClusterCommThread;
////////////////////////////////////////////////////////////////////////////////
@ -63,7 +61,7 @@ typedef TRI_voc_tick_t CoordTransactionID;
/// @brief trype of an operation ID
////////////////////////////////////////////////////////////////////////////////
typedef Ticket OperationID;
typedef communicator::Ticket OperationID;
////////////////////////////////////////////////////////////////////////////////
/// @brief status of an (a-)synchronous cluster operation
@ -398,10 +396,11 @@ class ClusterComm {
/// @brief get the unique instance
//////////////////////////////////////////////////////////////////////////////
static ClusterComm* instance();
static std::shared_ptr<ClusterComm> instance();
//////////////////////////////////////////////////////////////////////////////
/// @brief initialize function to call once when still single-threaded
/// @brief initialize function to call once, instance() can be called
/// beforehand but the background thread is only started here.
//////////////////////////////////////////////////////////////////////////////
static void initialize();
@ -531,11 +530,21 @@ class ClusterComm {
std::string const& destination, arangodb::rest::RequestType reqtype,
std::string const* body,
std::unordered_map<std::string, std::string> const& headerFields);
//////////////////////////////////////////////////////////////////////////////
/// @brief the pointer to the singleton instance
//////////////////////////////////////////////////////////////////////////////
static ClusterComm* _theinstance;
static std::shared_ptr<ClusterComm> _theInstance;
//////////////////////////////////////////////////////////////////////////////
/// @brief the following atomic int is 0 in the beginning, is set to 1
/// if some thread initializes the singleton and is 2 once _theInstance
/// is set. Note that after a shutdown has happened, _theInstance can be
/// a nullptr, which means no new ClusterComm operations can be started.
//////////////////////////////////////////////////////////////////////////////
static std::atomic<int> _theInstanceInit;
//////////////////////////////////////////////////////////////////////////////
/// @brief produces an operation ID which is unique in this process
@ -561,8 +570,8 @@ class ClusterComm {
std::shared_ptr<ClusterCommResult> result;
};
typedef std::unordered_map<Ticket, AsyncResponse>::iterator ResponseIterator;
std::unordered_map<Ticket, AsyncResponse> responses;
typedef std::unordered_map<communicator::Ticket, AsyncResponse>::iterator ResponseIterator;
std::unordered_map<communicator::Ticket, AsyncResponse> responses;
// Receiving answers:
std::list<ClusterCommOperation*> received;

View File

@ -286,11 +286,9 @@ void ClusterFeature::prepare() {
ServerState::instance()->setId(_myId);
}
if (_requestedRole != ServerState::RoleEnum::ROLE_UNDEFINED) {
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
LOG(FATAL) << "Couldn't register at agency.";
FATAL_ERROR_EXIT();
}
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
LOG(FATAL) << "Couldn't register at agency.";
FATAL_ERROR_EXIT();
}
auto role = ServerState::instance()->getRole();

View File

@ -561,7 +561,7 @@ int revisionOnCoordinator(std::string const& dbname,
std::string const& collname, TRI_voc_rid_t& rid) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -635,7 +635,7 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
std::shared_ptr<arangodb::velocypack::Builder>& result) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -700,7 +700,7 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
std::vector<std::pair<std::string, uint64_t>>& result) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
result.clear();
@ -770,7 +770,7 @@ int createDocumentOnCoordinator(
std::shared_ptr<VPackBuilder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -905,7 +905,7 @@ int deleteDocumentOnCoordinator(
std::shared_ptr<arangodb::velocypack::Builder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -1134,7 +1134,7 @@ int truncateCollectionOnCoordinator(std::string const& dbname,
std::string const& collname) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -1190,7 +1190,7 @@ int getDocumentOnCoordinator(
std::shared_ptr<VPackBuilder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo;
@ -1461,7 +1461,7 @@ int fetchEdgesFromEngines(
VPackBuilder& builder,
size_t& filtered,
size_t& read) {
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// TODO map id => ServerID if possible
// And go fast-path
@ -1545,7 +1545,7 @@ void fetchVerticesFromEngines(
std::unordered_map<VPackSlice, std::shared_ptr<VPackBuffer<uint8_t>>>&
result,
VPackBuilder& builder) {
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// TODO map id => ServerID if possible
// And go fast-path
@ -1635,7 +1635,7 @@ int getFilteredEdgesOnCoordinator(
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo =
@ -1754,7 +1754,7 @@ int modifyDocumentOnCoordinator(
std::shared_ptr<VPackBuilder>& resultBody) {
// Set a few variables needed for our work:
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
// First determine the collection ID from the name:
std::shared_ptr<LogicalCollection> collinfo =
@ -2004,7 +2004,7 @@ int modifyDocumentOnCoordinator(
int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
std::vector<ServerID> DBservers = ci->getCurrentDBServers();
CoordTransactionID coordTransactionID = TRI_NewTickServer();
std::string url = std::string("/_admin/wal/flush?waitForSync=") +
@ -2033,6 +2033,7 @@ int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
}
if (nrok != (int)DBservers.size()) {
LOG(WARN) << "could not flush WAL on all servers. confirmed: " << nrok << ", expected: " << DBservers.size();
return TRI_ERROR_INTERNAL;
}

View File

@ -54,6 +54,8 @@ void DBServerAgencySync::work() {
DBServerAgencySyncResult DBServerAgencySync::execute() {
// default to system database
double startTime = TRI_microtime();
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "DBServerAgencySync::execute starting";
DatabaseFeature* database =
ApplicationServer::getFeature<DatabaseFeature>("Database");
@ -80,6 +82,11 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
return result;
}
double now = TRI_microtime();
if (now - startTime > 5.0) {
LOG(INFO) << "DBServerAgencySync::execute took more than 5s to get free V8 context, starting handle-plan-change now";
}
TRI_DEFER(V8DealerFeature::DEALER->exitContext(context));
auto isolate = context->_isolate;
@ -162,5 +169,10 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
} catch (...) {
}
now = TRI_microtime();
if (now - startTime > 30) {
LOG_TOPIC(WARN, Logger::HEARTBEAT) << "DBServerAgencySync::execute "
"took longer than 30s to execute handlePlanChange()";
}
return result;
}

View File

@ -110,8 +110,7 @@ class HeartbeatBackgroundJob {
public:
explicit HeartbeatBackgroundJob(std::shared_ptr<HeartbeatThread> hbt,
double startTime)
: _heartbeatThread(hbt), _startTime(startTime) {
_schedulerInfo = SchedulerFeature::SCHEDULER->infoStatus();
: _heartbeatThread(hbt), _startTime(startTime),_schedulerInfo(SchedulerFeature::SCHEDULER->infoStatus()) {
}
void operator()() {

View File

@ -251,51 +251,70 @@ bool ServerState::unregister() {
bool ServerState::registerWithRole(ServerState::RoleEnum role,
std::string const& myAddress) {
if (!getId().empty()) {
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "Registering with role and localinfo. Supplied id is being ignored";
return false;
}
AgencyComm comm;
AgencyCommResult result;
std::string localInfoEncoded = StringUtils::replace(
StringUtils::urlEncode(getLocalInfo()),"%2E",".");
result = comm.getValues("Target/MapLocalToID/" + localInfoEncoded);
std::string locinf = "Target/MapLocalToID/" +
(localInfoEncoded.empty() ? "bogus_hass_hund" : localInfoEncoded);
std::string dbidinf = "Plan/DBServers/" +
(_id.empty() ? "bogus_hass_hund" : _id);
std::string coidinf = "Plan/Coordinators/" +
(_id.empty() ? "bogus_hass_hund" : _id);
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
AgencyGeneralTransaction reg;
reg.operations.push_back( // my-local-info
operationType(AgencyOperation(locinf), AgencyPrecondition()));
reg.operations.push_back( // db my-id
operationType(AgencyOperation(dbidinf), AgencyPrecondition()));
reg.operations.push_back( // cooord my-id
operationType(AgencyOperation(coidinf), AgencyPrecondition()));
result = comm.sendTransactionWithFailover(reg, 0.0);
std::string id;
bool found = true;
if (!result.successful()) {
found = false;
} else {
VPackSlice idSlice = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Target",
"MapLocalToID", localInfoEncoded}));
if (!idSlice.isString()) {
found = false;
} else {
id = idSlice.copyString();
LOG(WARN) << "Have ID: " + id;
if (result.slice().isArray()) {
VPackSlice targetSlice, planSlice;
if (!_id.empty()) {
try {
if (
result.slice()[1].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan",
"DBServers", _id})).isString()) {
id = _id;
if (role == ServerState::ROLE_UNDEFINED) {
role = ServerState::ROLE_PRIMARY;
}
} else if (
result.slice()[2].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan",
"Coordinators", _id})).isString()) {
id = _id;
if (role == ServerState::ROLE_UNDEFINED) {
role = ServerState::ROLE_COORDINATOR;
}
}
} catch (...) {}
} else if (!localInfoEncoded.empty()) {
try {
id = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Target",
"MapLocalToID", localInfoEncoded})).copyString();
} catch (...) {}
}
}
createIdForRole(comm, role, id);
if (found) {
} else {
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Determining id from localinfo failed."
<< "Continuing with registering ourselves for the first time";
id = createIdForRole(comm, role);
}
id = createIdForRole(comm, role, id);
const std::string agencyKey = roleToAgencyKey(role);
const std::string planKey = "Plan/" + agencyKey + "/" + id;
const std::string currentKey = "Current/" + agencyKey + "/" + id;
auto builder = std::make_shared<VPackBuilder>();
result = comm.getValues(planKey);
found = true;
bool found = true;
if (!result.successful()) {
found = false;
} else {
@ -377,6 +396,9 @@ std::string ServerState::createIdForRole(AgencyComm comm,
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
std::string const agencyKey = roleToAgencyKey(role);
std::string roleName = ((role == ROLE_COORDINATOR) ? "Coordinator":"DBServer");
size_t shortNum(0);
VPackBuilder builder;
builder.add(VPackValue("none"));
@ -390,11 +412,22 @@ std::string ServerState::createIdForRole(AgencyComm comm,
auto filePath = dbpath->directory() + "/UUID";
std::ifstream ifs(filePath);
if (!id.empty()) {
if (id.compare(0, roleName.size(), roleName) == 0) {
try {
shortNum = std::stoul(id.substr(roleName.size(),3));
} catch(...) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Old id cannot be parsed for number.";
}
}
}
if (ifs.is_open()) {
std::getline(ifs, id);
ifs.close();
LOG_TOPIC(INFO, Logger::CLUSTER)
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Restarting with persisted UUID " << id;
} else {
mkdir (dbpath->directory());
@ -449,7 +482,7 @@ std::string ServerState::createIdForRole(AgencyComm comm,
reg.operations.push_back( // Get shortID
operationType(AgencyOperation(targetIdStr), AgencyPrecondition()));
result = comm.sendTransactionWithFailover(reg, 0.0);
VPackSlice latestId = result.slice()[2].get(
std::vector<std::string>(
{AgencyCommManager::path(), "Target",
@ -462,7 +495,8 @@ std::string ServerState::createIdForRole(AgencyComm comm,
localIdBuilder.add("TransactionID", latestId);
std::stringstream ss; // ShortName
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
<< std::setw(4) << std::setfill('0') << latestId.getNumber<uint32_t>();
<< std::setw(4) << std::setfill('0')
<< (shortNum ==0 ? latestId.getNumber<uint32_t>() : shortNum);
std::string shortName = ss.str();
localIdBuilder.add("ShortName", VPackValue(shortName));
}

View File

@ -754,7 +754,11 @@ static void JS_GetCollectionInfoClusterInfo(
uint32_t pos = 0;
for (auto const& s : p.second) {
try{
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(s)));
std::string t = s;
if (s.at(0) == '_') {
t = s.substr(1);
}
shorts->Set(pos, TRI_V8_STD_STRING(serverAliases.at(t)));
} catch (...) {}
list->Set(pos++, TRI_V8_STD_STRING(s));
}
@ -985,11 +989,23 @@ static void JS_GetDBServers(v8::FunctionCallbackInfo<v8::Value> const& args) {
auto serverAliases = ClusterInfo::instance()->getServerAliases();
v8::Handle<v8::Array> l = v8::Array::New(isolate);
for (size_t i = 0; i < DBServers.size(); ++i) {
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(DBServers[i]));
result->Set(TRI_V8_ASCII_STRING("serverName"),
TRI_V8_STD_STRING(serverAliases.at(DBServers[i])));
auto id = DBServers[i];
result->Set(TRI_V8_ASCII_STRING("serverId"), TRI_V8_STD_STRING(id));
auto itr = serverAliases.find(id);
if (itr != serverAliases.end()) {
result->Set(TRI_V8_ASCII_STRING("serverName"),
TRI_V8_STD_STRING(itr->second));
} else {
result->Set(TRI_V8_ASCII_STRING("serverName"),
TRI_V8_STD_STRING(id));
}
l->Set((uint32_t)i, result);
}
@ -1792,7 +1808,7 @@ static void JS_AsyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
// - singleRequest (boolean) default is false
// - initTimeout (number)
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
@ -1859,7 +1875,7 @@ static void JS_SyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
// role");
//}
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
@ -1911,7 +1927,7 @@ static void JS_Enquire(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_THROW_EXCEPTION_USAGE("enquire(operationID)");
}
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
@ -1948,7 +1964,7 @@ static void JS_Wait(v8::FunctionCallbackInfo<v8::Value> const& args) {
// - shardID (string)
// - timeout (number)
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
@ -2018,7 +2034,7 @@ static void JS_Drop(v8::FunctionCallbackInfo<v8::Value> const& args) {
// - operationID (number)
// - shardID (string)
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
if (cc == nullptr) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,

View File

@ -22,9 +22,10 @@
#include "AuthenticationFeature.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "RestServer/QueryRegistryFeature.h"
#include "Random/RandomGenerator.h"
#include "RestServer/QueryRegistryFeature.h"
using namespace arangodb;
using namespace arangodb::options;

View File

@ -91,8 +91,6 @@ class GeneralCommTask : public SocketTask {
virtual arangodb::Endpoint::TransportType transportType() = 0;
void setStatistics(uint64_t, RequestStatistics*);
protected:
virtual std::unique_ptr<GeneralResponse> createResponse(
rest::ResponseCode, uint64_t messageId) = 0;
@ -111,6 +109,7 @@ class GeneralCommTask : public SocketTask {
std::string const& errorMessage,
uint64_t messageId) = 0;
void setStatistics(uint64_t, RequestStatistics*);
RequestStatistics* acquireStatistics(uint64_t);
RequestStatistics* statistics(uint64_t);
RequestStatistics* stealStatistics(uint64_t);

View File

@ -28,8 +28,6 @@
#include "Basics/Common.h"
#include <boost/lockfree/queue.hpp>
#include "Basics/ConditionVariable.h"
#include "Endpoint/ConnectionInfo.h"
#include "GeneralServer/GeneralDefinitions.h"

View File

@ -41,7 +41,6 @@
#include "ProgramOptions/Parameters.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "Rest/Version.h"
#include "RestHandler/RestAdminLogHandler.h"
#include "RestHandler/RestAqlFunctionsHandler.h"
#include "RestHandler/RestAuthHandler.h"

View File

@ -36,7 +36,7 @@
#include <memory>
#include <stdexcept>
using namespace arangodb;
namespace arangodb {
inline std::size_t validateAndCount(char const* vpStart,
char const* vpEnd) {
@ -346,4 +346,6 @@ inline std::vector<std::unique_ptr<basics::StringBuffer>> createChunkForNetwork(
return rv;
}
}
#endif

View File

@ -313,7 +313,7 @@ void MMFilesEngine::getDatabases(arangodb::velocypack::Builder& result) {
if (!idSlice.isString() ||
id != static_cast<TRI_voc_tick_t>(basics::StringUtils::uint64(idSlice.copyString()))) {
LOG(ERR) << "database directory '" << directory
<< "' does not contain a valid parameters file";
<< "' does not contain a valid parameters file. database id is not a string";
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE);
}
@ -1237,7 +1237,14 @@ TRI_vocbase_t* MMFilesEngine::openExistingDatabase(TRI_voc_tick_t id, std::strin
/// @brief physically erases the database directory
int MMFilesEngine::dropDatabaseDirectory(std::string const& path) {
return TRI_RemoveDirectory(path.c_str());
// first create a .tmp file in the directory that will help us recover when we crash
// before the directory deletion is completed
std::string const tmpfile(
arangodb::basics::FileUtils::buildFilename(path, ".tmp"));
// ignore errors from writing this file...
TRI_WriteFile(tmpfile.c_str(), "", 0);
return TRI_RemoveDirectoryDeterministic(path.c_str());
}
/// @brief iterate over a set of datafiles, identified by filenames

View File

@ -118,7 +118,7 @@ friend class MMFilesGeoIndexIterator;
bool canBeDropped() const override { return true; }
bool isSorted() const override { return false; }
bool isSorted() const override { return true; }
bool hasSelectivityEstimate() const override { return false; }

View File

@ -24,10 +24,8 @@
#include "RestDebugHandler.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;

View File

@ -774,7 +774,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
}
// Set a few variables needed for our work:
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
std::unique_ptr<ClusterCommResult> res;
if (!useVpp) {

View File

@ -22,6 +22,7 @@
#include "CheckVersionFeature.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"

View File

@ -23,6 +23,7 @@
#include "ConsoleFeature.h"
#include "Basics/messages.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestServer/ConsoleThread.h"

View File

@ -303,9 +303,17 @@ void DatabaseFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
&_check30Revisions,
std::unordered_set<std::string>{"true", "false", "fail"}));
// the following option was removed in 3.2
// index-creation is now automatically parallelized via the Boost ASIO thread pool
options->addObsoleteOption(
"--database.index-threads",
"threads to start for parallel background index creation", true);
// the following options were removed in 3.2
options->addObsoleteOption("--database.revision-cache-chunk-size",
"chunk size (in bytes) for the document revisions cache", true);
options->addObsoleteOption("--database.revision-cache-target-size",
"total target size (in bytes) for the document revisions cache", true);
}
void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {

View File

@ -26,6 +26,7 @@
#include "Basics/ArangoGlobalContext.h"
#include "Basics/FileUtils.h"
#include "Basics/StringUtils.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"

View File

@ -23,6 +23,7 @@
#include "EndpointFeature.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestServer/ServerFeature.h"

View File

@ -23,7 +23,7 @@
#include "InitDatabaseFeature.h"
#include "Basics/FileUtils.h"
#include "Logger/LoggerFeature.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"

View File

@ -24,6 +24,7 @@
#include "Basics/Exceptions.h"
#include "Basics/FileUtils.h"
#include "Basics/files.h"
#include "Logger/Logger.h"
#include "RestServer/DatabasePathFeature.h"
using namespace arangodb;

View File

@ -23,6 +23,7 @@
#include "ScriptFeature.h"
#include "Basics/messages.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestServer/ConsoleThread.h"

View File

@ -29,8 +29,6 @@
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/VocbaseContext.h"
#include "Scheduler/SchedulerFeature.h"

View File

@ -23,6 +23,7 @@
#include "UnitTestsFeature.h"
#include "Basics/messages.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestServer/ConsoleThread.h"

View File

@ -92,114 +92,123 @@
using namespace arangodb;
static int runServer(int argc, char** argv) {
ArangoGlobalContext context(argc, argv, SBIN_DIRECTORY);
context.installSegv();
context.runStartupChecks();
try {
ArangoGlobalContext context(argc, argv, SBIN_DIRECTORY);
context.installSegv();
context.runStartupChecks();
std::string name = context.binaryName();
std::string name = context.binaryName();
auto options = std::make_shared<options::ProgramOptions>(
argv[0], "Usage: " + name + " [<options>]", "For more information use:",
SBIN_DIRECTORY);
auto options = std::make_shared<options::ProgramOptions>(
argv[0], "Usage: " + name + " [<options>]", "For more information use:",
SBIN_DIRECTORY);
application_features::ApplicationServer server(options, SBIN_DIRECTORY);
application_features::ApplicationServer server(options, SBIN_DIRECTORY);
std::vector<std::string> nonServerFeatures = {
"Action", "Affinity",
"Agency", "Authentication",
"Cluster", "Daemon",
"Dispatcher", "FoxxQueues",
"GeneralServer", "LoggerBufferFeature",
"Server", "SslServer",
"Statistics", "Supervisor"};
std::vector<std::string> nonServerFeatures = {
"Action", "Affinity",
"Agency", "Authentication",
"Cluster", "Daemon",
"Dispatcher", "FoxxQueues",
"GeneralServer", "LoggerBufferFeature",
"Server", "SslServer",
"Statistics", "Supervisor"};
int ret = EXIT_FAILURE;
int ret = EXIT_FAILURE;
server.addFeature(new ActionFeature(&server));
server.addFeature(new AgencyFeature(&server));
server.addFeature(new AuthenticationFeature(&server));
server.addFeature(new BootstrapFeature(&server));
server.addFeature(new CheckVersionFeature(&server, &ret, nonServerFeatures));
server.addFeature(new ClusterFeature(&server));
server.addFeature(new ConfigFeature(&server, name));
server.addFeature(new ConsoleFeature(&server));
server.addFeature(new DatabaseFeature(&server));
server.addFeature(new DatabasePathFeature(&server));
server.addFeature(new EndpointFeature(&server));
server.addFeature(new EngineSelectorFeature(&server));
server.addFeature(new FeatureCacheFeature(&server));
server.addFeature(new FileDescriptorsFeature(&server));
server.addFeature(new FoxxQueuesFeature(&server));
server.addFeature(new FrontendFeature(&server));
server.addFeature(new GeneralServerFeature(&server));
server.addFeature(new GreetingsFeature(&server, "arangod"));
server.addFeature(new InitDatabaseFeature(&server, nonServerFeatures));
server.addFeature(new LanguageFeature(&server));
server.addFeature(new LockfileFeature(&server));
server.addFeature(new MMFilesLogfileManager(&server));
server.addFeature(new LoggerBufferFeature(&server));
server.addFeature(new LoggerFeature(&server, true));
server.addFeature(new NonceFeature(&server));
server.addFeature(new PageSizeFeature(&server));
server.addFeature(new PrivilegeFeature(&server));
server.addFeature(new QueryRegistryFeature(&server));
server.addFeature(new TraverserEngineRegistryFeature(&server));
server.addFeature(new RandomFeature(&server));
server.addFeature(new RocksDBFeature(&server));
server.addFeature(new SchedulerFeature(&server));
server.addFeature(new ScriptFeature(&server, &ret));
server.addFeature(new ServerFeature(&server, &ret));
server.addFeature(new ServerIdFeature(&server));
server.addFeature(new ShutdownFeature(&server, {"UnitTests", "Script"}));
server.addFeature(new SslFeature(&server));
server.addFeature(new StatisticsFeature(&server));
server.addFeature(new TempFeature(&server, name));
server.addFeature(new TransactionManagerFeature(&server));
server.addFeature(new UnitTestsFeature(&server, &ret));
server.addFeature(new UpgradeFeature(&server, &ret, nonServerFeatures));
server.addFeature(new V8DealerFeature(&server));
server.addFeature(new V8PlatformFeature(&server));
server.addFeature(new VersionFeature(&server));
server.addFeature(new WorkMonitorFeature(&server));
server.addFeature(new ActionFeature(&server));
server.addFeature(new AgencyFeature(&server));
server.addFeature(new AuthenticationFeature(&server));
server.addFeature(new BootstrapFeature(&server));
server.addFeature(new CheckVersionFeature(&server, &ret, nonServerFeatures));
server.addFeature(new ClusterFeature(&server));
server.addFeature(new ConfigFeature(&server, name));
server.addFeature(new ConsoleFeature(&server));
server.addFeature(new DatabaseFeature(&server));
server.addFeature(new DatabasePathFeature(&server));
server.addFeature(new EndpointFeature(&server));
server.addFeature(new EngineSelectorFeature(&server));
server.addFeature(new FeatureCacheFeature(&server));
server.addFeature(new FileDescriptorsFeature(&server));
server.addFeature(new FoxxQueuesFeature(&server));
server.addFeature(new FrontendFeature(&server));
server.addFeature(new GeneralServerFeature(&server));
server.addFeature(new GreetingsFeature(&server, "arangod"));
server.addFeature(new InitDatabaseFeature(&server, nonServerFeatures));
server.addFeature(new LanguageFeature(&server));
server.addFeature(new LockfileFeature(&server));
server.addFeature(new MMFilesLogfileManager(&server));
server.addFeature(new LoggerBufferFeature(&server));
server.addFeature(new LoggerFeature(&server, true));
server.addFeature(new NonceFeature(&server));
server.addFeature(new PageSizeFeature(&server));
server.addFeature(new PrivilegeFeature(&server));
server.addFeature(new QueryRegistryFeature(&server));
server.addFeature(new TraverserEngineRegistryFeature(&server));
server.addFeature(new RandomFeature(&server));
server.addFeature(new RocksDBFeature(&server));
server.addFeature(new SchedulerFeature(&server));
server.addFeature(new ScriptFeature(&server, &ret));
server.addFeature(new ServerFeature(&server, &ret));
server.addFeature(new ServerIdFeature(&server));
server.addFeature(new ShutdownFeature(&server, {"UnitTests", "Script"}));
server.addFeature(new SslFeature(&server));
server.addFeature(new StatisticsFeature(&server));
server.addFeature(new TempFeature(&server, name));
server.addFeature(new TransactionManagerFeature(&server));
server.addFeature(new UnitTestsFeature(&server, &ret));
server.addFeature(new UpgradeFeature(&server, &ret, nonServerFeatures));
server.addFeature(new V8DealerFeature(&server));
server.addFeature(new V8PlatformFeature(&server));
server.addFeature(new VersionFeature(&server));
server.addFeature(new WorkMonitorFeature(&server));
#ifdef ARANGODB_HAVE_FORK
server.addFeature(new DaemonFeature(&server));
server.addFeature(new SupervisorFeature(&server));
server.addFeature(new DaemonFeature(&server));
server.addFeature(new SupervisorFeature(&server));
#endif
#ifdef _WIN32
server.addFeature(new WindowsServiceFeature(&server));
server.addFeature(new WindowsServiceFeature(&server));
#endif
#ifdef USE_ENTERPRISE
setupServerEE(&server);
setupServerEE(&server);
#else
server.addFeature(new SslServerFeature(&server));
server.addFeature(new SslServerFeature(&server));
#endif
// storage engines
server.addFeature(new MMFilesEngine(&server));
server.addFeature(new MMFilesWalRecoveryFeature(&server));
server.addFeature(new RocksDBEngine(&server));
// storage engines
server.addFeature(new MMFilesEngine(&server));
server.addFeature(new MMFilesWalRecoveryFeature(&server));
server.addFeature(new RocksDBEngine(&server));
try {
server.run(argc, argv);
if (server.helpShown()) {
// --help was displayed
ret = EXIT_SUCCESS;
try {
server.run(argc, argv);
if (server.helpShown()) {
// --help was displayed
ret = EXIT_SUCCESS;
}
} catch (std::exception const& ex) {
LOG(ERR) << "arangod terminated because of an unhandled exception: "
<< ex.what();
ret = EXIT_FAILURE;
} catch (...) {
LOG(ERR) << "arangod terminated because of an unhandled exception of "
"unknown type";
ret = EXIT_FAILURE;
}
Logger::flush();
return context.exit(ret);
} catch (std::exception const& ex) {
LOG(ERR) << "arangod terminated because of an unhandled exception: "
<< ex.what();
ret = EXIT_FAILURE;
} catch (...) {
LOG(ERR) << "arangod terminated because of an unhandled exception of "
"unknown type";
ret = EXIT_FAILURE;
}
Logger::flush();
return context.exit(ret);
exit(EXIT_FAILURE);
}
#if _WIN32

View File

@ -28,8 +28,6 @@
#include "Scheduler/Socket.h"
#include "Ssl/SslServerFeature.h"
using namespace arangodb;
namespace arangodb {
class Acceptor {
public:

View File

@ -30,13 +30,13 @@ using namespace arangodb;
void AcceptorUnixDomain::open() {
std::string path(((EndpointUnixDomain*) _endpoint)->path());
if (FileUtils::exists(path)) {
if (basics::FileUtils::exists(path)) {
// socket file already exists
LOG(WARN) << "socket file '" << path << "' already exists.";
int error = 0;
// delete previously existing socket file
if (FileUtils::remove(path, &error)) {
if (basics::FileUtils::remove(path, &error)) {
LOG(WARN) << "deleted previously existing socket file '" << path << "'";
} else {
LOG(ERR) << "unable to delete previously existing socket file '" << path
@ -66,7 +66,7 @@ void AcceptorUnixDomain::close() {
_acceptor.close();
int error = 0;
std::string path = ((EndpointUnixDomain*) _endpoint)->path();
if (!FileUtils::remove(path, &error)) {
if (!basics::FileUtils::remove(path, &error)) {
LOG(TRACE) << "unable to remove socket file '" << path << "'";
}
}

View File

@ -105,10 +105,14 @@ class SchedulerThread : public Thread {
_service->run_one();
if (++counter > EVERY_LOOP) {
counter = 0;
auto now = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = now - start;
if (diff.count() > MIN_SECONDS) {
start = std::chrono::steady_clock::now();
if (_scheduler->stopThread()) {
auto n = _scheduler->decRunning();
@ -118,8 +122,6 @@ class SchedulerThread : public Thread {
break;
}
}
start = std::chrono::steady_clock::now();
}
}
}

View File

@ -49,7 +49,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
double keepAliveTimeout, bool skipInit = false)
: Task(loop, "SocketTask"),
_connectionStatistics(nullptr),
_connectionInfo(connectionInfo),
_connectionInfo(std::move(connectionInfo)),
_readBuffer(TRI_UNKNOWN_MEM_ZONE, READ_BLOCK_SIZE + 1, false),
_writeBuffer(nullptr, nullptr),
_peer(std::move(socket)),

View File

@ -22,9 +22,11 @@
#include "Scheduler/SocketUnixDomain.h"
#include "Basics/StringBuffer.h"
using namespace arangodb;
size_t SocketUnixDomain::write(StringBuffer* buffer, boost::system::error_code& ec) {
size_t SocketUnixDomain::write(basics::StringBuffer* buffer, boost::system::error_code& ec) {
return socketcommon::doWrite(_socket, buffer, ec);
}
void SocketUnixDomain::asyncWrite(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) {

View File

@ -27,9 +27,11 @@
#include <boost/asio/local/stream_protocol.hpp>
using namespace arangodb::basics;
namespace arangodb {
namespace basics {
class StringBuffer;
}
class SocketUnixDomain final : public Socket {
public:
SocketUnixDomain(boost::asio::io_service& ioService, boost::asio::ssl::context&& context)
@ -51,7 +53,7 @@ class SocketUnixDomain final : public Socket {
bool sslHandshake() override { return false; }
size_t write(StringBuffer* buffer, boost::system::error_code& ec) override;
size_t write(basics::StringBuffer* buffer, boost::system::error_code& ec) override;
void asyncWrite(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) override;

View File

@ -26,9 +26,12 @@
#include "Basics/Common.h"
#include "Basics/Mutex.h"
#include "Statistics/StatisticsFeature.h"
#include "Statistics/figures.h"
#include <boost/lockfree/queue.hpp>
namespace arangodb {
class ConnectionStatistics {
public:

View File

@ -22,11 +22,11 @@
////////////////////////////////////////////////////////////////////////////////
#include "RequestStatistics.h"
#include "Basics/MutexLocker.h"
#include "Logger/Logger.h"
#include <iomanip>
#include "Basics/MutexLocker.h"
using namespace arangodb;
using namespace arangodb::basics;

View File

@ -26,10 +26,13 @@
#include "Basics/Common.h"
#include "Basics/Mutex.h"
#include "Rest/CommonDefines.h"
#include "Statistics/StatisticsFeature.h"
#include "Statistics/figures.h"
#include <boost/lockfree/queue.hpp>
namespace arangodb {
class RequestStatistics {
public:

View File

@ -22,6 +22,8 @@
#include "StatisticsFeature.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "Statistics/ConnectionStatistics.h"

View File

@ -22,6 +22,7 @@
#include "EngineSelectorFeature.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "MMFiles/MMFilesEngine.h"

View File

@ -50,11 +50,11 @@ class StorageEngine : public application_features::ApplicationFeature {
_typeName(engineName) {
// each specific storage engine feature is optional. the storage engine selection feature
// will make sure that exactly one engine is selected at startup
// will make sure that exactly one engine is selected at startup
setOptional(true);
// storage engines must not use elevated privileges for files etc
requiresElevatedPrivileges(false);
startsAfter("DatabasePath");
startsAfter("EngineSelector");
startsAfter("FileDescriptors");
@ -63,12 +63,12 @@ class StorageEngine : public application_features::ApplicationFeature {
virtual void start() {}
virtual void stop() {}
virtual void recoveryDone(TRI_vocbase_t* vocbase) {}
virtual void recoveryDone(TRI_vocbase_t* vocbase) {}
// create storage-engine specific collection
virtual PhysicalCollection* createPhysicalCollection(LogicalCollection*) = 0;
// status functionality
// --------------------
@ -83,21 +83,21 @@ class StorageEngine : public application_features::ApplicationFeature {
// by the storage engine. this method must sort out databases that were not
// fully created (see "createDatabase" below). called at server start only
virtual void getDatabases(arangodb::velocypack::Builder& result) = 0;
// fills the provided builder with information about the collection
virtual void getCollectionInfo(TRI_vocbase_t* vocbase, TRI_voc_cid_t cid,
arangodb::velocypack::Builder& result,
// fills the provided builder with information about the collection
virtual void getCollectionInfo(TRI_vocbase_t* vocbase, TRI_voc_cid_t cid,
arangodb::velocypack::Builder& result,
bool includeIndexes, TRI_voc_tick_t maxTick) = 0;
// fill the Builder object with an array of collections (and their corresponding
// indexes) that were detected by the storage engine. called at server start separately
// for each database
virtual int getCollectionsAndIndexes(TRI_vocbase_t* vocbase, arangodb::velocypack::Builder& result,
virtual int getCollectionsAndIndexes(TRI_vocbase_t* vocbase, arangodb::velocypack::Builder& result,
bool wasCleanShutdown, bool isUpgrade) = 0;
// return the path for a database
virtual std::string databasePath(TRI_vocbase_t const* vocbase) const = 0;
// return the path for a collection
virtual std::string collectionPath(TRI_vocbase_t const* vocbase, TRI_voc_cid_t id) const = 0;
@ -107,78 +107,78 @@ class StorageEngine : public application_features::ApplicationFeature {
// -----------------------------------------
// asks the storage engine to create a database as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server that
// Slice object and persist the creation info. It is guaranteed by the server that
// no other active database with the same name and id exists when this function
// is called. If this operation fails somewhere in the middle, the storage
// engine is required to fully clean up the creation and throw only then,
// is called. If this operation fails somewhere in the middle, the storage
// engine is required to fully clean up the creation and throw only then,
// so that subsequent database creation requests will not fail.
// the WAL entry for the database creation will be written *after* the call
// to "createDatabase" returns
virtual TRI_vocbase_t* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& data) = 0;
// asks the storage engine to drop the specified database and persist the
// deletion info. Note that physical deletion of the database data must not
// be carried out by this call, as there may still be readers of the database's data.
// It is recommended that this operation only sets a deletion flag for the database
// but let's an async task perform the actual deletion.
// asks the storage engine to drop the specified database and persist the
// deletion info. Note that physical deletion of the database data must not
// be carried out by this call, as there may still be readers of the database's data.
// It is recommended that this operation only sets a deletion flag for the database
// but let's an async task perform the actual deletion.
// the WAL entry for database deletion will be written *after* the call
// to "prepareDropDatabase" returns
virtual int prepareDropDatabase(TRI_vocbase_t* vocbase) = 0;
// perform a physical deletion of the database
// perform a physical deletion of the database
virtual int dropDatabase(TRI_vocbase_t* vocbase) = 0;
/// @brief wait until a database directory disappears
/// @brief wait until a database directory disappears -- FIXME force WAIT or Delete Add keyword Database to signature
virtual int waitUntilDeletion(TRI_voc_tick_t id, bool force) = 0;
// asks the storage engine to create a collection as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server
// Slice object and persist the creation info. It is guaranteed by the server
// that no other active collection with the same name and id exists in the same
// database when this function is called. If this operation fails somewhere in
// the middle, the storage engine is required to fully clean up the creation
// database when this function is called. If this operation fails somewhere in
// the middle, the storage engine is required to fully clean up the creation
// and throw only then, so that subsequent collection creation requests will not fail.
// the WAL entry for the collection creation will be written *after* the call
// to "createCollection" returns
virtual std::string createCollection(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
arangodb::LogicalCollection const* parameters) = 0;
// asks the storage engine to drop the specified collection and persist the
// deletion info. Note that physical deletion of the collection data must not
// asks the storage engine to drop the specified collection and persist the
// deletion info. Note that physical deletion of the collection data must not
// be carried out by this call, as there may
// still be readers of the collection's data. It is recommended that this operation
// only sets a deletion flag for the collection but let's an async task perform
// the actual deletion.
// the WAL entry for collection deletion will be written *after* the call
// to "dropCollection" returns
virtual void prepareDropCollection(TRI_vocbase_t* vocbase, arangodb::LogicalCollection* collection) = 0;
virtual void prepareDropCollection(TRI_vocbase_t* vocbase, arangodb::LogicalCollection* collection) = 0;
// perform a physical deletion of the collection
virtual void dropCollection(TRI_vocbase_t* vocbase, arangodb::LogicalCollection* collection) = 0;
// asks the storage engine to change properties of the collection as specified in
// the VPack Slice object and persist them. If this operation fails
// somewhere in the middle, the storage engine is required to fully revert the
virtual void dropCollection(TRI_vocbase_t* vocbase, arangodb::LogicalCollection* collection) = 0;
// asks the storage engine to change properties of the collection as specified in
// the VPack Slice object and persist them. If this operation fails
// somewhere in the middle, the storage engine is required to fully revert the
// property changes and throw only then, so that subsequent operations will not fail.
// the WAL entry for the propery change will be written *after* the call
// to "changeCollection" returns
virtual void changeCollection(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
arangodb::LogicalCollection const* parameters,
bool doSync) = 0;
// asks the storage engine to create an index as specified in the VPack
// Slice object and persist the creation info. The database id, collection id
// Slice object and persist the creation info. The database id, collection id
// and index data are passed in the Slice object. Note that this function
// is not responsible for inserting the individual documents into the index.
// If this operation fails somewhere in the middle, the storage engine is required
// to fully clean up the creation and throw only then, so that subsequent index
// If this operation fails somewhere in the middle, the storage engine is required
// to fully clean up the creation and throw only then, so that subsequent index
// creation requests will not fail.
// the WAL entry for the index creation will be written *after* the call
// to "createIndex" returns
virtual void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) = 0;
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
// as there may still be users of the index. It is recommended that this operation
// only sets a deletion flag for the index but let's an async task perform
// the actual deletion.
@ -198,7 +198,7 @@ class StorageEngine : public application_features::ApplicationFeature {
}
virtual void unloadCollection(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId) = 0;
virtual void signalCleanup(TRI_vocbase_t* vocbase) = 0;
// document operations
@ -216,13 +216,13 @@ class StorageEngine : public application_features::ApplicationFeature {
// into the storage engine's realm
virtual void addDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) = 0;
// removes a document from the storage engine
// this will be called by the WAL collector when non-surviving documents are being removed
// from the storage engine's realm
virtual void removeDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) = 0;
/// @brief remove data of expired compaction blockers
virtual bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) = 0;
@ -234,30 +234,30 @@ class StorageEngine : public application_features::ApplicationFeature {
/// @brief remove an existing compaction blocker
virtual int removeCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id) = 0;
/// @brief a callback function that is run while it is guaranteed that there is no compaction ongoing
virtual void preventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback) = 0;
/// @brief a callback function that is run there is no compaction ongoing
virtual bool tryPreventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback,
bool checkForActiveBlockers) = 0;
virtual int shutdownDatabase(TRI_vocbase_t* vocbase) = 0;
virtual int shutdownDatabase(TRI_vocbase_t* vocbase) = 0;
virtual int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection, bool ignoreErrors) = 0;
/// @brief transfer markers into a collection
virtual int transferMarkers(LogicalCollection* collection, MMFilesCollectorCache*,
MMFilesOperationsType const&) = 0;
protected:
arangodb::LogicalCollection* registerCollection(
TRI_vocbase_t* vocbase, arangodb::velocypack::Slice params) {
return vocbase->registerCollection(true, params);
}
private:
std::unique_ptr<IndexFactory> const _indexFactory;

View File

@ -1780,16 +1780,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName);
LogicalCollection* collection = documentCollection(trxCollection(cid));
// First see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
if (options.returnNew) {
orderDitch(cid); // will throw when it fails
}
@ -1820,11 +1810,6 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
return res;
}
if (options.silent && !doingSynchronousReplication) {
// no need to construct the result object
return TRI_ERROR_NO_ERROR;
}
uint8_t const* vpack = result.vpack();
TRI_ASSERT(vpack != nullptr);
@ -1867,6 +1852,15 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
}
// Now see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
@ -1953,7 +1947,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
}
}
if (doingSynchronousReplication && options.silent) {
if (options.silent) {
// We needed the results, but do not want to report:
resultBuilder.clear();
}
@ -2081,16 +2075,6 @@ OperationResult Transaction::modifyLocal(
orderDitch(cid); // will throw when it fails
}
// First see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
// Update/replace are a read and a write, let's get the write lock already
// for the read operation:
int res = lock(trxCollection(cid), AccessMode::Type::WRITE);
@ -2128,7 +2112,7 @@ OperationResult Transaction::modifyLocal(
if (res == TRI_ERROR_ARANGO_CONFLICT) {
// still return
if ((!options.silent || doingSynchronousReplication) && !isBabies) {
if (!isBabies) {
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
options.returnOld ? previous.vpack() : nullptr, nullptr);
@ -2141,13 +2125,11 @@ OperationResult Transaction::modifyLocal(
uint8_t const* vpack = result.vpack();
TRI_ASSERT(vpack != nullptr);
if (!options.silent || doingSynchronousReplication) {
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(collection, resultBuilder, cid, key,
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
options.returnOld ? previous.vpack() : nullptr ,
options.returnNew ? vpack : nullptr);
}
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(collection, resultBuilder, cid, key,
TRI_ExtractRevisionId(VPackSlice(vpack)), actualRevision,
options.returnOld ? previous.vpack() : nullptr ,
options.returnNew ? vpack : nullptr);
return TRI_ERROR_NO_ERROR;
};
@ -2176,6 +2158,16 @@ OperationResult Transaction::modifyLocal(
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
}
// Now see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
// get here, in the single document case, we do not try to replicate
@ -2265,7 +2257,7 @@ OperationResult Transaction::modifyLocal(
}
}
if (doingSynchronousReplication && options.silent) {
if (options.silent) {
// We needed the results, but do not want to report:
resultBuilder.clear();
}
@ -2335,16 +2327,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
orderDitch(cid); // will throw when it fails
}
// First see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
VPackBuilder resultBuilder;
TRI_voc_tick_t maxTick = 0;
@ -2383,7 +2365,6 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
if (res != TRI_ERROR_NO_ERROR) {
if (res == TRI_ERROR_ARANGO_CONFLICT &&
(!options.silent || doingSynchronousReplication) &&
!isBabies) {
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
options.returnOld ? previous.vpack() : nullptr, nullptr);
@ -2391,10 +2372,8 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
return res;
}
if (!options.silent || doingSynchronousReplication) {
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
options.returnOld ? previous.vpack() : nullptr, nullptr);
}
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision, 0,
options.returnOld ? previous.vpack() : nullptr, nullptr);
return TRI_ERROR_NO_ERROR;
};
@ -2421,6 +2400,16 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
}
// Now see whether or not we have to do synchronous replication:
std::shared_ptr<std::vector<ServerID> const> followers;
bool doingSynchronousReplication = false;
if (ServerState::isDBServer(_serverRole)) {
// Now replicate the same operation on all followers:
auto const& followerInfo = collection->followers();
followers = followerInfo->get();
doingSynchronousReplication = followers->size() > 0;
}
if (doingSynchronousReplication && res == TRI_ERROR_NO_ERROR) {
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
// get here, in the single document case, we do not try to replicate
@ -2508,7 +2497,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
}
}
if (doingSynchronousReplication && options.silent) {
if (options.silent) {
// We needed the results, but do not want to report:
resultBuilder.clear();
}

View File

@ -838,7 +838,7 @@ static TRI_action_result_t ExecuteActionVocbase(
// copy suffix, which comes from the action:
std::string path = request->prefix();
v8::Handle<v8::Array> suffixArray = v8::Array::New(isolate);
std::vector<std::string> const& suffixes = request->suffixes();
std::vector<std::string> const& suffixes = request->suffixes(); // TODO: does this need to be decodedSuffixes()??
uint32_t index = 0;
char const* sep = "";
@ -1347,7 +1347,7 @@ static bool clusterSendToAllServers(
std::string const& path, // Note: Has to be properly encoded!
arangodb::rest::RequestType const& method, std::string const& body) {
ClusterInfo* ci = ClusterInfo::instance();
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
std::string url = "/_db/" + StringUtils::urlEncode(dbname) + "/" + path;
// Have to propagate to DB Servers

View File

@ -2137,7 +2137,7 @@ static void ListDatabasesCoordinator(
if (!DBServers.empty()) {
ServerID sid = DBServers[0];
ClusterComm* cc = ClusterComm::instance();
auto cc = ClusterComm::instance();
std::unordered_map<std::string, std::string> headers;
headers["Authentication"] = TRI_ObjectToString(args[2]);

View File

@ -25,7 +25,6 @@
#include "LogicalCollection.h"
#include "Aql/QueryCache.h"
#include "Basics/Barrier.h"
#include "Basics/LocalTaskQueue.h"
#include "Basics/ReadLocker.h"
#include "Basics/StaticStrings.h"
@ -87,7 +86,7 @@ class IndexFillerTask : public basics::LocalTask {
try {
_idx->batchInsert(_trx, _documents, _queue);
} catch (std::exception& e) {
} catch (std::exception const&) {
_queue->setStatus(TRI_ERROR_INTERNAL);
}
@ -1464,7 +1463,6 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
int res = fillIndexes(trx, indexListLocal, false);
if (res != TRI_ERROR_NO_ERROR) {
usleep(1000000);
THROW_ARANGO_EXCEPTION(res);
}
@ -1861,7 +1859,7 @@ int LogicalCollection::fillIndexes(
insertInAllIndexes();
if (queue.status() != TRI_ERROR_NO_ERROR) {
break;
};
}
documents.clear();
}
}
@ -1876,6 +1874,7 @@ int LogicalCollection::fillIndexes(
// TODO: fix perf logging?
} catch (arangodb::basics::Exception const& ex) {
queue.setStatus(ex.code());
LOG(WARN) << "caught exception while filling indexes: " << ex.what();
} catch (std::bad_alloc const&) {
queue.setStatus(TRI_ERROR_OUT_OF_MEMORY);
} catch (std::exception const& ex) {

View File

@ -1,17 +1,12 @@
# -*- mode: CMAKE; -*-
# these are the install targets for the client package.
# we can't use RUNTIME DESTINATION here.
# include(/tmp/dump_vars.cmake)
message( "CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}/ CMAKE_INSTALL_SBINDIR ${CMAKE_INSTALL_SBINDIR}")
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
execute_process(COMMAND mkdir -p ${STRIP_DIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_SBINDIR})
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_SBINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}"
)

View File

@ -31,6 +31,7 @@
#include "ApplicationFeatures/VersionFeature.h"
#include "Basics/ArangoGlobalContext.h"
#include "Benchmark/BenchFeature.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "Random/RandomFeature.h"

View File

@ -30,6 +30,7 @@
#include "ApplicationFeatures/VersionFeature.h"
#include "Basics/ArangoGlobalContext.h"
#include "Dump/DumpFeature.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "Random/RandomFeature.h"

View File

@ -31,6 +31,7 @@
#include "ApplicationFeatures/VersionFeature.h"
#include "Basics/ArangoGlobalContext.h"
#include "Import/ImportFeature.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "Random/RandomFeature.h"

View File

@ -32,6 +32,7 @@
#include "ApplicationFeatures/V8PlatformFeature.h"
#include "ApplicationFeatures/VersionFeature.h"
#include "Basics/ArangoGlobalContext.h"
#include "Logger/Logger.h"
#include "Logger/LoggerFeature.h"
#include "ProgramOptions/ProgramOptions.h"
#include "Random/RandomFeature.h"
@ -45,48 +46,57 @@ using namespace arangodb;
using namespace arangodb::application_features;
int main(int argc, char* argv[]) {
ArangoGlobalContext context(argc, argv, BIN_DIRECTORY);
context.installHup();
std::string name = context.binaryName();
std::shared_ptr<options::ProgramOptions> options(new options::ProgramOptions(
argv[0], "Usage: " + name + " [<options>]", "For more information use:", BIN_DIRECTORY));
ApplicationServer server(options, BIN_DIRECTORY);
int ret = EXIT_SUCCESS;
server.addFeature(new ClientFeature(&server));
server.addFeature(new ConfigFeature(&server, name));
server.addFeature(new ConsoleFeature(&server));
server.addFeature(new GreetingsFeature(&server, "arangosh"));
server.addFeature(new LanguageFeature(&server));
server.addFeature(new LoggerFeature(&server, false));
server.addFeature(new RandomFeature(&server));
server.addFeature(new ShellFeature(&server, &ret));
server.addFeature(new ShutdownFeature(&server, {"Shell"}));
server.addFeature(new SslFeature(&server));
server.addFeature(new TempFeature(&server, name));
server.addFeature(new V8PlatformFeature(&server));
server.addFeature(new V8ShellFeature(&server, name));
server.addFeature(new VersionFeature(&server));
try {
server.run(argc, argv);
if (server.helpShown()) {
// --help was displayed
ret = EXIT_SUCCESS;
ArangoGlobalContext context(argc, argv, BIN_DIRECTORY);
context.installHup();
std::string name = context.binaryName();
std::shared_ptr<options::ProgramOptions> options(new options::ProgramOptions(
argv[0], "Usage: " + name + " [<options>]", "For more information use:", BIN_DIRECTORY));
ApplicationServer server(options, BIN_DIRECTORY);
int ret = EXIT_SUCCESS;
try {
server.addFeature(new ClientFeature(&server));
server.addFeature(new ConfigFeature(&server, name));
server.addFeature(new ConsoleFeature(&server));
server.addFeature(new GreetingsFeature(&server, "arangosh"));
server.addFeature(new LanguageFeature(&server));
server.addFeature(new LoggerFeature(&server, false));
server.addFeature(new RandomFeature(&server));
server.addFeature(new ShellFeature(&server, &ret));
server.addFeature(new ShutdownFeature(&server, {"Shell"}));
server.addFeature(new SslFeature(&server));
server.addFeature(new TempFeature(&server, name));
server.addFeature(new V8PlatformFeature(&server));
server.addFeature(new V8ShellFeature(&server, name));
server.addFeature(new VersionFeature(&server));
server.run(argc, argv);
if (server.helpShown()) {
// --help was displayed
ret = EXIT_SUCCESS;
}
} catch (std::exception const& ex) {
LOG(ERR) << "arangosh terminated because of an unhandled exception: "
<< ex.what();
ret = EXIT_FAILURE;
} catch (...) {
LOG(ERR) << "arangosh terminated because of an unhandled exception of "
"unknown type";
ret = EXIT_FAILURE;
}
return context.exit(ret);
} catch (std::exception const& ex) {
LOG(ERR) << "arangosh terminated because of an unhandled exception: "
<< ex.what();
ret = EXIT_FAILURE;
} catch (...) {
LOG(ERR) << "arangosh terminated because of an unhandled exception of "
"unknown type";
ret = EXIT_FAILURE;
}
return context.exit(ret);
exit(EXIT_FAILURE);
}

View File

@ -2,63 +2,30 @@
# these are the install targets for the client package.
# we can't use RUNTIME DESTINATION here.
set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip")
execute_process(COMMAND mkdir -p ${STRIP_DIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
if (NOT MSVC AND CMAKE_STRIP)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_FILE})
set(FILE ${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${CMAKE_INSTALL_BINDIR})
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
install_debinfo(
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}"
"${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}")

View File

@ -19,9 +19,9 @@ endif()
# debug info directory:
if (${CMAKE_INSTALL_LIBDIR} STREQUAL "usr/lib64")
# some systems have weird places for usr/lib:
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/${CMAKE_PROJECT_NAME}")
set(CMAKE_INSTALL_DEBINFO_DIR "usr/lib/debug/")
else ()
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_PROJECT_NAME}")
set(CMAKE_INSTALL_DEBINFO_DIR "${CMAKE_INSTALL_LIBDIR}/debug/")
endif ()
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/${CMAKE_PROJECT_NAME}")

View File

@ -157,3 +157,41 @@ macro(to_native_path sourceVarName)
endif()
set("INC_${sourceVarName}" ${myVar})
endmacro()
macro(install_debinfo
STRIP_DIR
USER_SUB_DEBINFO_DIR
USER_FILE
USER_STRIP_FILE)
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
set(FILE ${USER_FILE})
set(STRIP_FILE ${STRIP_DIR}/${USER_STRIP_FILE})
execute_process(COMMAND mkdir -p ${STRIP_DIR})
if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
if (NOT ${FILE_CHECKSUM} STREQUAL "")
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
set(STRIP_FILE "${STRIP_FILE}.debug")
endif()
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_DIR}/${STRIP_FILE})
set(FILE ${STRIP_DIR}/${STRIP_FILE})
endif()
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
endmacro()

Some files were not shown because too many files have changed in this diff Show More