mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
a49fe5ad21
|
@ -4,6 +4,17 @@
|
||||||
# External Projects used by ArangoDB
|
# External Projects used by ArangoDB
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
if(NOT EXISTS ${PROJECT_SOURCE_DIR}/3rdParty/V8/v8/LICENSE AND
|
||||||
|
NOT EXISTS ${PROJECT_SOURCE_DIR}/3rdParty/V8/v8/testing/gtest/LICENSE)
|
||||||
|
message(FATAL_ERROR "GIT sumbodules not checked out properly - aborting! Run:
|
||||||
|
git submodule update --recursive
|
||||||
|
git submodule update --init --recursive
|
||||||
|
On Windows you need to make sure git is recent enough and may create symlinks!
|
||||||
|
")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -23,6 +23,12 @@ devel
|
||||||
JavaScript document operations from 1239 ("illegal document revision")
|
JavaScript document operations from 1239 ("illegal document revision")
|
||||||
to 1200 ("conflict").
|
to 1200 ("conflict").
|
||||||
|
|
||||||
|
* added data export tool, arangoexport.
|
||||||
|
|
||||||
|
arangoexport can be used to export collections to json and jsonl
|
||||||
|
and export a graph or collections to xgmml.
|
||||||
|
|
||||||
|
|
||||||
v3.2.alpha1 (2017-02-05)
|
v3.2.alpha1 (2017-02-05)
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,7 @@ set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
|
||||||
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
|
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
|
||||||
set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
|
set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
|
||||||
set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
|
set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
|
||||||
|
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - dataexporter")
|
||||||
set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer")
|
set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer")
|
||||||
set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
|
set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
|
||||||
set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
|
set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
|
||||||
|
@ -108,6 +109,7 @@ set(LIB_ARANGO_V8 arango_v8)
|
||||||
set(BIN_ARANGOBENCH arangobench)
|
set(BIN_ARANGOBENCH arangobench)
|
||||||
set(BIN_ARANGOD arangod)
|
set(BIN_ARANGOD arangod)
|
||||||
set(BIN_ARANGODUMP arangodump)
|
set(BIN_ARANGODUMP arangodump)
|
||||||
|
set(BIN_ARANGOEXPORT arangoexport)
|
||||||
set(BIN_ARANGOIMP arangoimp)
|
set(BIN_ARANGOIMP arangoimp)
|
||||||
set(BIN_ARANGORESTORE arangorestore)
|
set(BIN_ARANGORESTORE arangorestore)
|
||||||
set(BIN_ARANGOSH arangosh)
|
set(BIN_ARANGOSH arangosh)
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
Exporting Data from an ArangoDB database
|
||||||
|
======================================
|
||||||
|
|
||||||
|
To export data from an ArangoDB server instance, you will need to invoke _arangoexport_.
|
||||||
|
_arangoexport_ can be invoked by executing
|
||||||
|
the following command:
|
||||||
|
|
||||||
|
unix> arangoexport --collection test --output-directory "dump"
|
||||||
|
|
||||||
|
This exports the collections *test* into the directory *dump* as one big json array. Every entry
|
||||||
|
in this array is one document from the collection without a specific order. To export more than
|
||||||
|
one collection at a time specify multiple *--collection* options.
|
||||||
|
|
||||||
|
The default output directory is *export*.
|
||||||
|
|
||||||
|
_arangoexport_ will by default connect to the *_system* database using the default
|
||||||
|
endpoint. If you want to connect to a different database or a different endpoint,
|
||||||
|
or use authentication, you can use the following command-line options:
|
||||||
|
|
||||||
|
* *--server.database <string>*: name of the database to connect to
|
||||||
|
* *--server.endpoint <string>*: endpoint to connect to
|
||||||
|
* *--server.username <string>*: username
|
||||||
|
* *--server.password <string>*: password to use (omit this and you'll be prompted for the
|
||||||
|
password)
|
||||||
|
* *--server.authentication <bool>*: whether or not to use authentication
|
||||||
|
|
||||||
|
Here's an example of exporting data from a non-standard endpoint, using a dedicated
|
||||||
|
[database name](../Appendix/Glossary.md#database-name):
|
||||||
|
|
||||||
|
unix> arangoexport --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --collection test --output-directory "my-export"
|
||||||
|
|
||||||
|
When finished, _arangoexport_ will print out a summary line with some aggregate
|
||||||
|
statistics about what it did, e.g.:
|
||||||
|
|
||||||
|
Processed 2 collection(s), wrote 9031763 Byte(s), 78 HTTP request(s)
|
||||||
|
|
||||||
|
|
||||||
|
Export JSON
|
||||||
|
-----------
|
||||||
|
|
||||||
|
unix> arangoexport --type json --collection test
|
||||||
|
|
||||||
|
This exports the collection *test* into the output directory *export* as one json array.
|
||||||
|
Every array entry is one document from the collection *test*
|
||||||
|
|
||||||
|
Export JSONL
|
||||||
|
------------
|
||||||
|
|
||||||
|
unix> arangoexport --type jsonl --collection test
|
||||||
|
|
||||||
|
This exports the collection *test* into the output directory *export* as jsonl. Every line in the export is one document from the collection *test* as json.
|
||||||
|
|
||||||
|
Export XGMML
|
||||||
|
------------
|
||||||
|
|
||||||
|
[XGMML](https://en.wikipedia.org/wiki/XGMML) is an XML application based on [GML](https://en.wikipedia.org/wiki/Graph_Modelling_Language). To view the XGMML file you can use for example [Cytoscape](http://cytoscape.org).
|
||||||
|
|
||||||
|
## XGMML specific options
|
||||||
|
|
||||||
|
*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file.
|
||||||
|
|
||||||
|
*--xgmml-label-only* set to true will only export the label without any attributes in edges or nodes.
|
||||||
|
|
||||||
|
|
||||||
|
## export based on collections
|
||||||
|
|
||||||
|
unix> arangoexport --type xgmml --graph-name mygraph --collection vertex --collection edge
|
||||||
|
|
||||||
|
This exports the a unnamed graph with vertex collection *vertex* and edge collection *edge* into the xgmml file *mygraph.xgmml*.
|
||||||
|
|
||||||
|
|
||||||
|
## export based on a named graph
|
||||||
|
|
||||||
|
unix> arangoexport --type xgmml --graph-name mygraph
|
||||||
|
|
||||||
|
This exports the named graph mygraph into the xgmml file *mygraph.xgmml*.
|
||||||
|
|
||||||
|
|
||||||
|
## export XGMML without attributes
|
||||||
|
|
||||||
|
unix> arangoexport --type xgmml --graph-name mygraph --xgmml-label-only true
|
||||||
|
|
||||||
|
This exports the named graph mygraph into the xgmml file *mygraph.xgmml* without the *<att>* tag in nodes and edges.
|
||||||
|
|
||||||
|
|
||||||
|
## export XGMML with a specific label
|
||||||
|
|
||||||
|
unix> arangoexport --type xgmml --graph-name mygraph --xgmml-label-attribute name
|
||||||
|
|
||||||
|
This exports the named graph mygraph into the xgmml file *mygraph.xgmml* with a label from documents attribute *name* instead of the default attribute *label*.
|
|
@ -0,0 +1,59 @@
|
||||||
|
Features and Improvements
|
||||||
|
=========================
|
||||||
|
|
||||||
|
The following list shows in detail which features have been added or improved in
|
||||||
|
ArangoDB 3.2. ArangoDB 3.2 also contains several bugfixes that are not listed
|
||||||
|
here.
|
||||||
|
|
||||||
|
SmartGraphs
|
||||||
|
-----------
|
||||||
|
|
||||||
|
|
||||||
|
Data format
|
||||||
|
-----------
|
||||||
|
|
||||||
|
|
||||||
|
Communication Layer
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
|
||||||
|
Cluster
|
||||||
|
-------
|
||||||
|
|
||||||
|
|
||||||
|
Document revisions cache
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
|
||||||
|
AQL
|
||||||
|
---
|
||||||
|
|
||||||
|
### Functions added
|
||||||
|
|
||||||
|
|
||||||
|
### Optimizer improvements
|
||||||
|
|
||||||
|
|
||||||
|
### Miscellaneous improvements
|
||||||
|
|
||||||
|
|
||||||
|
Audit Log
|
||||||
|
---------
|
||||||
|
|
||||||
|
|
||||||
|
Client tools
|
||||||
|
------------
|
||||||
|
|
||||||
|
Added the tool _arangoexport_ to export collections to json and jsonl. It can also export graphs or collections to xgmml.
|
||||||
|
|
||||||
|
Web Admin Interface
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
|
||||||
|
Authentication
|
||||||
|
--------------
|
||||||
|
|
||||||
|
|
||||||
|
Foxx
|
||||||
|
----
|
||||||
|
|
|
@ -4,6 +4,7 @@ Release Notes
|
||||||
Whats New
|
Whats New
|
||||||
---------
|
---------
|
||||||
|
|
||||||
|
- [Whats New in 3.2](NewFeatures32.md)
|
||||||
- [Whats New in 3.1](NewFeatures31.md)
|
- [Whats New in 3.1](NewFeatures31.md)
|
||||||
- [Whats New in 3.0](NewFeatures30.md)
|
- [Whats New in 3.0](NewFeatures30.md)
|
||||||
- [Whats New in 2.8](NewFeatures28.md)
|
- [Whats New in 2.8](NewFeatures28.md)
|
||||||
|
|
|
@ -144,6 +144,7 @@
|
||||||
* [Arangoimp](Administration/Arangoimp.md)
|
* [Arangoimp](Administration/Arangoimp.md)
|
||||||
* [Arangodump](Administration/Arangodump.md)
|
* [Arangodump](Administration/Arangodump.md)
|
||||||
* [Arangorestore](Administration/Arangorestore.md)
|
* [Arangorestore](Administration/Arangorestore.md)
|
||||||
|
* [Arangoexport](Administration/Arangoexport.md)
|
||||||
* [Managing Users](Administration/ManagingUsers.md)
|
* [Managing Users](Administration/ManagingUsers.md)
|
||||||
* [Server Configuration](Administration/Configuration/README.md)
|
* [Server Configuration](Administration/Configuration/README.md)
|
||||||
* [Managing Endpoints](Administration/Configuration/Endpoint.md)
|
* [Managing Endpoints](Administration/Configuration/Endpoint.md)
|
||||||
|
@ -192,6 +193,7 @@
|
||||||
# * [Server Internals](Architecture/ServerInternals.md)
|
# * [Server Internals](Architecture/ServerInternals.md)
|
||||||
#
|
#
|
||||||
* [Release notes](ReleaseNotes/README.md)
|
* [Release notes](ReleaseNotes/README.md)
|
||||||
|
* [Whats New in 3.2](ReleaseNotes/NewFeatures32.md)
|
||||||
* [Whats New in 3.1](ReleaseNotes/NewFeatures31.md)
|
* [Whats New in 3.1](ReleaseNotes/NewFeatures31.md)
|
||||||
* [Incompatible changes in 3.1](ReleaseNotes/UpgradingChanges31.md)
|
* [Incompatible changes in 3.1](ReleaseNotes/UpgradingChanges31.md)
|
||||||
* [Whats New in 3.0](ReleaseNotes/NewFeatures30.md)
|
* [Whats New in 3.0](ReleaseNotes/NewFeatures30.md)
|
||||||
|
|
|
@ -17,6 +17,7 @@ if (USE_MAINTAINER_MODE)
|
||||||
man1/arangodump.1
|
man1/arangodump.1
|
||||||
man1/arangoimp.1
|
man1/arangoimp.1
|
||||||
man1/arangorestore.1
|
man1/arangorestore.1
|
||||||
|
man1/arangoexport.1
|
||||||
man1/arangosh.1
|
man1/arangosh.1
|
||||||
man8/rcarangod.8
|
man8/rcarangod.8
|
||||||
man8/arangod.8
|
man8/arangod.8
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
.TH arangoexport 1 "3.2.devel" "ArangoDB" "ArangoDB"
|
||||||
|
.SH NAME
|
||||||
|
arangoexport - a tool to export collections of an ArangoDB database
|
||||||
|
.SH SYNOPSIS
|
||||||
|
arangoexport [options]
|
||||||
|
.SH DESCRIPTION
|
||||||
|
The arangoexport binary can be used to export collections of an ArangoDB
|
||||||
|
database to json and jsonl. It can also export a graph or collections
|
||||||
|
to xgmml.
|
||||||
|
|
||||||
|
arangoexport will work on the specified database only. If no database name
|
||||||
|
is specified, arangoexport will work on the default database ("_system").
|
||||||
|
|
||||||
|
The exported jsonl files can be re-imported in an ArangoDB database
|
||||||
|
using the arangoimp tool.
|
||||||
|
.SH OPTIONS
|
||||||
|
The arangoexport binary has many options that can be used to control its
|
||||||
|
behavior. For a complete list of options, please refer to the
|
||||||
|
ArangoDB online manual, available at https://www.arangodb.com/ or run
|
||||||
|
arangoexport --help.
|
||||||
|
|
||||||
|
.SH AUTHOR
|
||||||
|
Copyright ArangoDB GmbH, Cologne, Germany
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
NAME
|
||||||
|
<COMMAND> - a tool to export collections of an ArangoDB database
|
||||||
|
SYNOPSIS
|
||||||
|
<COMMAND> [options]
|
||||||
|
DESCRIPTION
|
||||||
|
The <COMMAND> binary can be used to export collections of an ArangoDB
|
||||||
|
database to json and jsonl. It can also export a graph or collections
|
||||||
|
to xgmml.
|
||||||
|
|
||||||
|
<COMMAND> will work on the specified database only. If no database name
|
||||||
|
is specified, <COMMAND> will work on the default database ("_system").
|
||||||
|
|
||||||
|
The exported jsonl files can be re-imported in an ArangoDB database
|
||||||
|
using the arangoimp tool.
|
||||||
|
OPTIONS
|
||||||
|
The <COMMAND> binary has many options that can be used to control its
|
||||||
|
behavior. For a complete list of options, please refer to the
|
||||||
|
ArangoDB online manual, available at https://www.arangodb.com/ or run
|
||||||
|
<COMMAND> --help.
|
||||||
|
|
||||||
|
AUTHOR
|
|
@ -462,7 +462,9 @@ It does not, if `SUMMARY.md` in `Books/ppbooks/` looks like this:
|
||||||
If sub-chapters do not show in the navigation, try another browser (Firefox).
|
If sub-chapters do not show in the navigation, try another browser (Firefox).
|
||||||
Chrome's security policies are pretty strict about localhost and file://
|
Chrome's security policies are pretty strict about localhost and file://
|
||||||
protocol. You may access the docs through a local web server to lift the
|
protocol. You may access the docs through a local web server to lift the
|
||||||
restrictions.
|
restrictions. You can use pythons build in http server for this.
|
||||||
|
|
||||||
|
~/books$ python -m SimpleHTTPServer 8000
|
||||||
|
|
||||||
To only regereneate one file (faster) you may specify a filter:
|
To only regereneate one file (faster) you may specify a filter:
|
||||||
|
|
||||||
|
@ -498,8 +500,9 @@ generate
|
||||||
- `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect*
|
- `./utils/generateExamples.sh --onlyThisOne geoIndexSelect` will only produce one example - *geoIndexSelect*
|
||||||
- `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that
|
- `./utils/generateExamples.sh --onlyThisOne 'MOD.*'` will only produce the examples matching that regex; Note that
|
||||||
examples with enumerations in their name may base on others in their series - so you should generate the whole group.
|
examples with enumerations in their name may base on others in their series - so you should generate the whole group.
|
||||||
- `./utils/generateExamples.sh --server.endpoint tcp://127.0.0.1:8529` will utilize an existing arangod instead of starting a new one.
|
- running `onlyThisOne` in conjunction with a pre-started server cuts down the execution time even more.
|
||||||
this does seriously cut down the execution time.
|
In addition to the `--onlyThisOne ...` specify i.e. `--server.endpoint tcp://127.0.0.1:8529` to utilize your already running arangod.
|
||||||
|
Please note that examples may collide with existing collections like 'test' - you need to make sure your server is clean enough.
|
||||||
- you can use generateExamples like that:
|
- you can use generateExamples like that:
|
||||||
`./utils/generateExamples.sh \
|
`./utils/generateExamples.sh \
|
||||||
--server.endpoint 'tcp://127.0.0.1:8529' \
|
--server.endpoint 'tcp://127.0.0.1:8529' \
|
||||||
|
|
|
@ -28,8 +28,6 @@
|
||||||
|
|
||||||
#include "Actions/actions.h"
|
#include "Actions/actions.h"
|
||||||
|
|
||||||
class TRI_action_t;
|
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
class RestActionHandler : public RestVocbaseBaseHandler {
|
class RestActionHandler : public RestVocbaseBaseHandler {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -25,7 +25,13 @@
|
||||||
#include "Agency/Agent.h"
|
#include "Agency/Agent.h"
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
|
#include <velocypack/Builder.h>
|
||||||
|
#include <velocypack/Iterator.h>
|
||||||
|
#include <velocypack/Slice.h>
|
||||||
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "Agency/MoveShard.h"
|
#include "Agency/MoveShard.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
|
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId,
|
std::string const& jobId,
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
FailedFollower::FailedFollower(Node const& snapshot, Agent* agent,
|
FailedFollower::FailedFollower(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId,
|
std::string const& jobId,
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "Agency/UnassumedLeadership.h"
|
#include "Agency/UnassumedLeadership.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
FailedServer::FailedServer(Node const& snapshot, Agent* agent,
|
FailedServer::FailedServer(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
Inception::Inception() : Thread("Inception"), _agent(nullptr) {}
|
Inception::Inception() : Thread("Inception"), _agent(nullptr) {}
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "Job.h"
|
#include "Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
|
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
|
||||||
if (!plan.isArray() || !current.isArray()) {
|
if (!plan.isArray() || !current.isArray()) {
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "Node.h"
|
#include "Node.h"
|
||||||
#include "Supervision.h"
|
#include "Supervision.h"
|
||||||
|
|
||||||
|
#include <velocypack/Builder.h>
|
||||||
#include <velocypack/Iterator.h>
|
#include <velocypack/Iterator.h>
|
||||||
#include <velocypack/Slice.h>
|
#include <velocypack/Slice.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
@ -41,7 +42,7 @@ namespace consensus {
|
||||||
// and all others followers. Both arguments must be arrays. Returns true,
|
// and all others followers. Both arguments must be arrays. Returns true,
|
||||||
// if the first items in both slice are equal and if both arrays contain
|
// if the first items in both slice are equal and if both arrays contain
|
||||||
// the same set of strings.
|
// the same set of strings.
|
||||||
bool compareServerLists(Slice plan, Slice current);
|
bool compareServerLists(arangodb::velocypack::Slice plan, arangodb::velocypack::Slice current);
|
||||||
|
|
||||||
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
|
||||||
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
|
||||||
|
@ -63,9 +64,9 @@ static std::string const plannedServers = "/Plan/DBServers";
|
||||||
static std::string const healthPrefix = "/Supervision/Health/";
|
static std::string const healthPrefix = "/Supervision/Health/";
|
||||||
|
|
||||||
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
|
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
|
||||||
Builder const& transaction,
|
arangodb::velocypack::Builder const& transaction,
|
||||||
bool waitForCommit = true) {
|
bool waitForCommit = true) {
|
||||||
query_t envelope = std::make_shared<Builder>();
|
query_t envelope = std::make_shared<arangodb::velocypack::Builder>();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
envelope->openArray();
|
envelope->openArray();
|
||||||
|
@ -137,7 +138,7 @@ struct Job {
|
||||||
std::string _creator;
|
std::string _creator;
|
||||||
std::string _agencyPrefix;
|
std::string _agencyPrefix;
|
||||||
|
|
||||||
std::shared_ptr<Builder> _jb;
|
std::shared_ptr<arangodb::velocypack::Builder> _jb;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
static std::string const DBServer = "DBServer";
|
static std::string const DBServer = "DBServer";
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
|
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -33,8 +33,9 @@
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
struct NotEmpty {
|
struct NotEmpty {
|
||||||
bool operator()(const std::string& s) { return !s.empty(); }
|
bool operator()(const std::string& s) { return !s.empty(); }
|
||||||
|
|
|
@ -27,6 +27,9 @@
|
||||||
#include "AgencyCommon.h"
|
#include "AgencyCommon.h"
|
||||||
|
|
||||||
#include <velocypack/Buffer.h>
|
#include <velocypack/Buffer.h>
|
||||||
|
#include <velocypack/Builder.h>
|
||||||
|
#include <velocypack/Slice.h>
|
||||||
|
#include <velocypack/ValueType.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
@ -50,8 +53,6 @@ enum Operation {
|
||||||
REPLACE
|
REPLACE
|
||||||
};
|
};
|
||||||
|
|
||||||
using namespace arangodb::velocypack;
|
|
||||||
|
|
||||||
class StoreException : public std::exception {
|
class StoreException : public std::exception {
|
||||||
public:
|
public:
|
||||||
explicit StoreException(std::string const& message) : _message(message) {}
|
explicit StoreException(std::string const& message) : _message(message) {}
|
||||||
|
@ -161,10 +162,10 @@ class Node {
|
||||||
bool handle(arangodb::velocypack::Slice const&);
|
bool handle(arangodb::velocypack::Slice const&);
|
||||||
|
|
||||||
/// @brief Create Builder representing this store
|
/// @brief Create Builder representing this store
|
||||||
void toBuilder(Builder&, bool showHidden = false) const;
|
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
|
||||||
|
|
||||||
/// @brief Create Builder representing this store
|
/// @brief Create Builder representing this store
|
||||||
void toObject(Builder&, bool showHidden = false) const;
|
void toObject(arangodb::velocypack::Builder&, bool showHidden = false) const;
|
||||||
|
|
||||||
/// @brief Access children
|
/// @brief Access children
|
||||||
Children& children();
|
Children& children();
|
||||||
|
@ -173,10 +174,10 @@ class Node {
|
||||||
Children const& children() const;
|
Children const& children() const;
|
||||||
|
|
||||||
/// @brief Create slice from value
|
/// @brief Create slice from value
|
||||||
Slice slice() const;
|
arangodb::velocypack::Slice slice() const;
|
||||||
|
|
||||||
/// @brief Get value type
|
/// @brief Get value type
|
||||||
ValueType valueType() const;
|
arangodb::velocypack::ValueType valueType() const;
|
||||||
|
|
||||||
/// @brief Add observer for this node
|
/// @brief Add observer for this node
|
||||||
bool addObserver(std::string const&);
|
bool addObserver(std::string const&);
|
||||||
|
@ -221,7 +222,7 @@ class Node {
|
||||||
std::string getString() const;
|
std::string getString() const;
|
||||||
|
|
||||||
/// @brief Get array value
|
/// @brief Get array value
|
||||||
Slice getArray() const;
|
arangodb::velocypack::Slice getArray() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// @brief Add time to live entry
|
/// @brief Add time to live entry
|
||||||
|
@ -237,8 +238,8 @@ class Node {
|
||||||
Store* _store; ///< @brief Store
|
Store* _store; ///< @brief Store
|
||||||
Children _children; ///< @brief child nodes
|
Children _children; ///< @brief child nodes
|
||||||
TimePoint _ttl; ///< @brief my expiry
|
TimePoint _ttl; ///< @brief my expiry
|
||||||
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
|
std::vector<arangodb::velocypack::Buffer<uint8_t>> _value; ///< @brief my value
|
||||||
mutable Buffer<uint8_t> _vecBuf;
|
mutable arangodb::velocypack::Buffer<uint8_t> _vecBuf;
|
||||||
mutable bool _vecBufDirty;
|
mutable bool _vecBufDirty;
|
||||||
bool _isArray;
|
bool _isArray;
|
||||||
};
|
};
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
|
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
|
||||||
std::string const& jobId, std::string const& creator,
|
std::string const& jobId, std::string const& creator,
|
||||||
|
|
|
@ -34,10 +34,10 @@
|
||||||
#include "Rest/HttpRequest.h"
|
#include "Rest/HttpRequest.h"
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
|
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
using namespace arangodb::rest;
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::rest;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief ArangoDB server
|
/// @brief ArangoDB server
|
||||||
|
|
|
@ -40,8 +40,9 @@
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
/// Non-Emptyness of string
|
/// Non-Emptyness of string
|
||||||
struct NotEmpty {
|
struct NotEmpty {
|
||||||
|
|
|
@ -60,10 +60,10 @@ class Store : public arangodb::Thread {
|
||||||
std::vector<bool> apply(query_t const& query, bool verbose = false);
|
std::vector<bool> apply(query_t const& query, bool verbose = false);
|
||||||
|
|
||||||
/// @brief Apply single entry in query
|
/// @brief Apply single entry in query
|
||||||
bool apply(Slice const& query, bool verbose = false);
|
bool apply(arangodb::velocypack::Slice const& query, bool verbose = false);
|
||||||
|
|
||||||
/// @brief Apply entry in query
|
/// @brief Apply entry in query
|
||||||
std::vector<bool> apply(std::vector<Slice> const& query,
|
std::vector<bool> apply(std::vector<arangodb::velocypack::Slice> const& query,
|
||||||
index_t lastCommitIndex, term_t term,
|
index_t lastCommitIndex, term_t term,
|
||||||
bool inform = true);
|
bool inform = true);
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ class Store : public arangodb::Thread {
|
||||||
bool start();
|
bool start();
|
||||||
|
|
||||||
/// @brief Dump everything to builder
|
/// @brief Dump everything to builder
|
||||||
void dumpToBuilder(Builder&) const;
|
void dumpToBuilder(arangodb::velocypack::Builder&) const;
|
||||||
|
|
||||||
/// @brief Notify observers
|
/// @brief Notify observers
|
||||||
void notifyObservers() const;
|
void notifyObservers() const;
|
||||||
|
@ -92,7 +92,7 @@ class Store : public arangodb::Thread {
|
||||||
Store& operator=(VPackSlice const& slice);
|
Store& operator=(VPackSlice const& slice);
|
||||||
|
|
||||||
/// @brief Create Builder representing this store
|
/// @brief Create Builder representing this store
|
||||||
void toBuilder(Builder&, bool showHidden = false) const;
|
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
|
||||||
|
|
||||||
/// @brief Copy out a node
|
/// @brief Copy out a node
|
||||||
Node get(std::string const& path = std::string("/")) const;
|
Node get(std::string const& path = std::string("/")) const;
|
||||||
|
|
|
@ -41,9 +41,9 @@
|
||||||
#include "Basics/MutexLocker.h"
|
#include "Basics/MutexLocker.h"
|
||||||
|
|
||||||
using namespace arangodb;
|
using namespace arangodb;
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
|
||||||
using namespace arangodb::application_features;
|
using namespace arangodb::application_features;
|
||||||
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
std::string Supervision::_agencyPrefix = "/arango";
|
std::string Supervision::_agencyPrefix = "/arango";
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "Agency/Job.h"
|
#include "Agency/Job.h"
|
||||||
|
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
UnassumedLeadership::UnassumedLeadership(
|
UnassumedLeadership::UnassumedLeadership(
|
||||||
Node const& snapshot, Agent* agent, std::string const& jobId,
|
Node const& snapshot, Agent* agent, std::string const& jobId,
|
||||||
|
|
|
@ -39,6 +39,7 @@ using namespace arangodb;
|
||||||
using namespace arangodb::application_features;
|
using namespace arangodb::application_features;
|
||||||
using namespace arangodb::basics;
|
using namespace arangodb::basics;
|
||||||
using namespace arangodb::consensus;
|
using namespace arangodb::consensus;
|
||||||
|
using namespace arangodb::velocypack;
|
||||||
|
|
||||||
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||||
|
|
|
@ -360,18 +360,18 @@ bool ServerState::registerShortName(std::string const& id, ServerState::RoleEnum
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief try to integrate into a cluster
|
/// @brief try to integrate into a cluster
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
bool ServerState::integrateIntoCluster(ServerState::RoleEnum const& role,
|
bool ServerState::integrateIntoCluster(ServerState::RoleEnum role,
|
||||||
std::string const& myAddress,
|
std::string const& myAddress,
|
||||||
std::string const& myId) {
|
std::string const& myId) {
|
||||||
// id supplied via command line this is deprecated
|
// id supplied via command line this is deprecated
|
||||||
if (!myId.empty()) {
|
if (!myId.empty()) {
|
||||||
if (!hasPersistedId()) {
|
if (!hasPersistedId()) {
|
||||||
setId(myId);
|
setId(myId);
|
||||||
ServerState::RoleEnum roleInAgency = getRole();
|
role = getRole();
|
||||||
|
|
||||||
// we are known to the agency under our old id!
|
// we are known to the agency under our old id!
|
||||||
if (roleInAgency != ServerState::ROLE_UNDEFINED) {
|
if (role != ServerState::ROLE_UNDEFINED) {
|
||||||
registerShortName(myId, roleInAgency);
|
registerShortName(myId, role);
|
||||||
writePersistedId(myId);
|
writePersistedId(myId);
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(FATAL, Logger::STARTUP) << "started with --cluster.my-id but id unknown in agency!";
|
LOG_TOPIC(FATAL, Logger::STARTUP) << "started with --cluster.my-id but id unknown in agency!";
|
||||||
|
@ -397,50 +397,9 @@ bool ServerState::integrateIntoCluster(ServerState::RoleEnum const& role,
|
||||||
}
|
}
|
||||||
setId(id);
|
setId(id);
|
||||||
|
|
||||||
registerAtAgency(comm, role, id);
|
if (!registerAtAgency(comm, role, id)) {
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
const std::string agencyKey = roleToAgencyKey(role);
|
|
||||||
const std::string planKey = "Plan/" + agencyKey + "/" + id;
|
|
||||||
const std::string currentKey = "Current/" + agencyKey + "/" + id;
|
|
||||||
|
|
||||||
auto builder = std::make_shared<VPackBuilder>();
|
|
||||||
result = comm.getValues(planKey);
|
|
||||||
bool found = true;
|
|
||||||
if (!result.successful()) {
|
|
||||||
found = false;
|
|
||||||
} else {
|
|
||||||
VPackSlice plan = result.slice()[0].get(std::vector<std::string>(
|
|
||||||
{AgencyCommManager::path(), "Plan", agencyKey, id}));
|
|
||||||
if (!plan.isString()) {
|
|
||||||
found = false;
|
|
||||||
} else {
|
|
||||||
builder->add(plan);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!found) {
|
|
||||||
// mop: hmm ... we are registered but not part of the Plan :O
|
|
||||||
// create a plan for ourselves :)
|
|
||||||
builder->add(VPackValue("none"));
|
|
||||||
|
|
||||||
VPackSlice plan = builder->slice();
|
|
||||||
|
|
||||||
comm.setValue(planKey, plan, 0.0);
|
|
||||||
if (!result.successful()) {
|
|
||||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't create plan "
|
|
||||||
<< result.errorMessage();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result = comm.setValue(currentKey, builder->slice(), 0.0);
|
|
||||||
|
|
||||||
if (!result.successful()) {
|
|
||||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not talk to agency! "
|
|
||||||
<< result.errorMessage();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
_id = id;
|
|
||||||
|
|
||||||
findAndSetRoleBlocking();
|
findAndSetRoleBlocking();
|
||||||
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
|
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
|
||||||
|
@ -554,7 +513,7 @@ bool ServerState::registerAtAgency(AgencyComm& comm,
|
||||||
if (!result.successful()) {
|
if (!result.successful()) {
|
||||||
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't fetch Plan/" << agencyKey
|
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't fetch Plan/" << agencyKey
|
||||||
<< " from agency. Agency is not initialized?";
|
<< " from agency. Agency is not initialized?";
|
||||||
FATAL_ERROR_EXIT();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
VPackSlice servers = result.slice()[0].get(
|
VPackSlice servers = result.slice()[0].get(
|
||||||
|
@ -562,7 +521,7 @@ bool ServerState::registerAtAgency(AgencyComm& comm,
|
||||||
if (!servers.isObject()) {
|
if (!servers.isObject()) {
|
||||||
LOG_TOPIC(FATAL, Logger::STARTUP) << "Plan/" << agencyKey << " in agency is no object. "
|
LOG_TOPIC(FATAL, Logger::STARTUP) << "Plan/" << agencyKey << " in agency is no object. "
|
||||||
<< "Agency not initialized?";
|
<< "Agency not initialized?";
|
||||||
FATAL_ERROR_EXIT();
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
VPackSlice entry = servers.get(id);
|
VPackSlice entry = servers.get(id);
|
||||||
|
@ -570,16 +529,28 @@ bool ServerState::registerAtAgency(AgencyComm& comm,
|
||||||
<< id << " found in existing keys: " << (!entry.isNone());
|
<< id << " found in existing keys: " << (!entry.isNone());
|
||||||
|
|
||||||
std::string planUrl = "Plan/" + agencyKey + "/" + id;
|
std::string planUrl = "Plan/" + agencyKey + "/" + id;
|
||||||
|
std::string currentUrl = "Current/" + agencyKey + "/" + id;
|
||||||
|
|
||||||
AgencyGeneralTransaction reg;
|
AgencyGeneralTransaction reg;
|
||||||
reg.operations.push_back( // Plan entry if not exists
|
reg.operations.push_back( // Plan entry if not exists
|
||||||
operationType(
|
operationType(
|
||||||
AgencyOperation(planUrl, AgencyValueOperationType::SET, builder.slice()),
|
AgencyOperation(planUrl, AgencyValueOperationType::SET, builder.slice()),
|
||||||
AgencyPrecondition(planUrl, AgencyPrecondition::Type::EMPTY, true)));
|
AgencyPrecondition(planUrl, AgencyPrecondition::Type::EMPTY, true)));
|
||||||
|
|
||||||
|
reg.operations.push_back( // Current entry if not exists
|
||||||
|
operationType(
|
||||||
|
AgencyOperation(currentUrl, AgencyValueOperationType::SET, builder.slice()),
|
||||||
|
AgencyPrecondition(currentUrl, AgencyPrecondition::Type::EMPTY, true)));
|
||||||
|
|
||||||
// ok to fail (at least that was how it was before :S)
|
// ok to fail..if it failed we are already registered
|
||||||
// XXX this should probably be sent as part of the transaction below
|
|
||||||
comm.sendTransactionWithFailover(reg, 0.0);
|
comm.sendTransactionWithFailover(reg, 0.0);
|
||||||
|
} else {
|
||||||
|
std::string currentUrl = "Current/" + agencyKey + "/" + _idOfPrimary;
|
||||||
|
AgencyCommResult result = comm.setValue(currentUrl, id, 0.0);
|
||||||
|
if (!result.successful()) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::STARTUP) << "Could not register ourselves as secondary in Current";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string targetIdStr =
|
std::string targetIdStr =
|
||||||
|
@ -660,7 +631,6 @@ bool ServerState::registerAtAgency(AgencyComm& comm,
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't register shortname for " << id;
|
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't register shortname for " << id;
|
||||||
FATAL_ERROR_EXIT();
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -942,6 +912,17 @@ bool ServerState::redetermineRole() {
|
||||||
RoleEnum oldRole = loadRole();
|
RoleEnum oldRole = loadRole();
|
||||||
if (role != oldRole) {
|
if (role != oldRole) {
|
||||||
LOG_TOPIC(INFO, Logger::CLUSTER) << "Changed role to: " << roleString;
|
LOG_TOPIC(INFO, Logger::CLUSTER) << "Changed role to: " << roleString;
|
||||||
|
if (oldRole == ROLE_PRIMARY && role == ROLE_SECONDARY) {
|
||||||
|
std::string oldId("Current/DBServers/" + _id);
|
||||||
|
AgencyOperation del(oldId, AgencySimpleOperationType::DELETE_OP);
|
||||||
|
AgencyOperation incrementVersion("Current/Version",
|
||||||
|
AgencySimpleOperationType::INCREMENT_OP);
|
||||||
|
|
||||||
|
AgencyWriteTransaction trx(std::vector<AgencyOperation> {del, incrementVersion});
|
||||||
|
|
||||||
|
AgencyComm comm;
|
||||||
|
comm.sendTransactionWithFailover(trx, 0.0);
|
||||||
|
}
|
||||||
if (!storeRole(role)) {
|
if (!storeRole(role)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1245,7 +1226,7 @@ bool ServerState::storeRole(RoleEnum role) {
|
||||||
ServerState::instance()->getPrimaryId());
|
ServerState::instance()->getPrimaryId());
|
||||||
AgencyOperation addMe(myId, AgencyValueOperationType::SET,
|
AgencyOperation addMe(myId, AgencyValueOperationType::SET,
|
||||||
builder.slice());
|
builder.slice());
|
||||||
AgencyOperation incrementVersion("Plan/Version",
|
AgencyOperation incrementVersion("Current/Version",
|
||||||
AgencySimpleOperationType::INCREMENT_OP);
|
AgencySimpleOperationType::INCREMENT_OP);
|
||||||
AgencyPrecondition precondition(myId, AgencyPrecondition::Type::EMPTY, false);
|
AgencyPrecondition precondition(myId, AgencyPrecondition::Type::EMPTY, false);
|
||||||
trx.reset(new AgencyWriteTransaction({addMe, incrementVersion}, precondition));
|
trx.reset(new AgencyWriteTransaction({addMe, incrementVersion}, precondition));
|
||||||
|
|
|
@ -139,7 +139,7 @@ class ServerState {
|
||||||
/// @brief get the server role
|
/// @brief get the server role
|
||||||
RoleEnum getRole();
|
RoleEnum getRole();
|
||||||
|
|
||||||
bool integrateIntoCluster(RoleEnum const&, std::string const&, std::string const&);
|
bool integrateIntoCluster(RoleEnum, std::string const&, std::string const&);
|
||||||
|
|
||||||
bool unregister();
|
bool unregister();
|
||||||
|
|
||||||
|
|
|
@ -295,7 +295,11 @@ bool MMFilesCollection::OpenIterator(TRI_df_marker_t const* marker, MMFilesColle
|
||||||
}
|
}
|
||||||
|
|
||||||
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
|
MMFilesCollection::MMFilesCollection(LogicalCollection* collection)
|
||||||
: PhysicalCollection(collection), _ditches(collection), _initialCount(0), _lastRevision(0) {}
|
: PhysicalCollection(collection)
|
||||||
|
, _ditches(collection)
|
||||||
|
, _initialCount(0), _lastRevision(0)
|
||||||
|
, _uncollectedLogfileEntries(0)
|
||||||
|
{}
|
||||||
|
|
||||||
MMFilesCollection::~MMFilesCollection() {
|
MMFilesCollection::~MMFilesCollection() {
|
||||||
try {
|
try {
|
||||||
|
@ -1134,6 +1138,11 @@ int MMFilesCollection::iterateMarkersOnLoad(arangodb::Transaction* trx) {
|
||||||
return TRI_ERROR_NO_ERROR;
|
return TRI_ERROR_NO_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MMFilesCollection::isFullyCollected() const {
|
||||||
|
int64_t uncollected = _uncollectedLogfileEntries.load();
|
||||||
|
return (uncollected == 0);
|
||||||
|
}
|
||||||
|
|
||||||
MMFilesDocumentPosition MMFilesCollection::lookupRevision(TRI_voc_rid_t revisionId) const {
|
MMFilesDocumentPosition MMFilesCollection::lookupRevision(TRI_voc_rid_t revisionId) const {
|
||||||
TRI_ASSERT(revisionId != 0);
|
TRI_ASSERT(revisionId != 0);
|
||||||
MMFilesDocumentPosition const old = _revisionsCache.lookup(revisionId);
|
MMFilesDocumentPosition const old = _revisionsCache.lookup(revisionId);
|
||||||
|
|
|
@ -162,6 +162,24 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
|
|
||||||
/// @brief iterate all markers of a collection on load
|
/// @brief iterate all markers of a collection on load
|
||||||
int iterateMarkersOnLoad(arangodb::Transaction* trx) override;
|
int iterateMarkersOnLoad(arangodb::Transaction* trx) override;
|
||||||
|
|
||||||
|
virtual bool isFullyCollected() const override;
|
||||||
|
|
||||||
|
int64_t uncollectedLogfileEntries() const {
|
||||||
|
return _uncollectedLogfileEntries.load();
|
||||||
|
}
|
||||||
|
|
||||||
|
void increaseUncollectedLogfileEntries(int64_t value) {
|
||||||
|
_uncollectedLogfileEntries += value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void decreaseUncollectedLogfileEntries(int64_t value) {
|
||||||
|
_uncollectedLogfileEntries -= value;
|
||||||
|
if (_uncollectedLogfileEntries < 0) {
|
||||||
|
_uncollectedLogfileEntries = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static int OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* marker,
|
static int OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* marker,
|
||||||
|
@ -221,6 +239,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
TRI_voc_rid_t _lastRevision;
|
TRI_voc_rid_t _lastRevision;
|
||||||
|
|
||||||
MMFilesRevisionsCache _revisionsCache;
|
MMFilesRevisionsCache _revisionsCache;
|
||||||
|
|
||||||
|
std::atomic<int64_t> _uncollectedLogfileEntries;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "Basics/hashes.h"
|
#include "Basics/hashes.h"
|
||||||
#include "Basics/memory-map.h"
|
#include "Basics/memory-map.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||||
#include "MMFiles/MMFilesLogfileManager.h"
|
#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
#include "MMFiles/MMFilesIndexElement.h"
|
#include "MMFiles/MMFilesIndexElement.h"
|
||||||
|
@ -678,7 +679,7 @@ int MMFilesCollectorThread::processCollectionOperations(MMFilesCollectorCache* c
|
||||||
<< collection->name() << "'";
|
<< collection->name() << "'";
|
||||||
updateDatafileStatistics(collection, cache);
|
updateDatafileStatistics(collection, cache);
|
||||||
|
|
||||||
collection->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
static_cast<arangodb::MMFilesCollection*>(collection->getPhysical())->decreaseUncollectedLogfileEntries(cache->totalOperationsCount);
|
||||||
|
|
||||||
res = TRI_ERROR_NO_ERROR;
|
res = TRI_ERROR_NO_ERROR;
|
||||||
} catch (arangodb::basics::Exception const& ex) {
|
} catch (arangodb::basics::Exception const& ex) {
|
||||||
|
|
|
@ -125,10 +125,10 @@ std::string const MMFilesEngine::FeatureName("MMFilesEngine");
|
||||||
|
|
||||||
// create the storage engine
|
// create the storage engine
|
||||||
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
||||||
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory()),
|
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory())
|
||||||
_isUpgrade(false),
|
, _isUpgrade(false)
|
||||||
_maxTick(0) {
|
, _maxTick(0)
|
||||||
}
|
{}
|
||||||
|
|
||||||
MMFilesEngine::~MMFilesEngine() {
|
MMFilesEngine::~MMFilesEngine() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,7 @@ public:
|
||||||
/// @brief Add engine specific AQL functions.
|
/// @brief Add engine specific AQL functions.
|
||||||
|
|
||||||
void addAqlFunctions() const override;
|
void addAqlFunctions() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// @brief: check the initial markers in a datafile
|
/// @brief: check the initial markers in a datafile
|
||||||
bool checkDatafileHeader(MMFilesDatafile* datafile, std::string const& filename) const;
|
bool checkDatafileHeader(MMFilesDatafile* datafile, std::string const& filename) const;
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "StorageEngine/TransactionState.h"
|
#include "StorageEngine/TransactionState.h"
|
||||||
#include "Utils/Transaction.h"
|
#include "Utils/Transaction.h"
|
||||||
#include "Utils/TransactionHints.h"
|
#include "Utils/TransactionHints.h"
|
||||||
|
@ -140,7 +141,8 @@ void MMFilesTransactionCollection::freeOperations(Transaction* activeTrx, bool m
|
||||||
_collection->setRevision(_originalRevision, true);
|
_collection->setRevision(_originalRevision, true);
|
||||||
} else if (!_collection->isVolatile() && !isSingleOperationTransaction) {
|
} else if (!_collection->isVolatile() && !isSingleOperationTransaction) {
|
||||||
// only count logfileEntries if the collection is durable
|
// only count logfileEntries if the collection is durable
|
||||||
_collection->increaseUncollectedLogfileEntries(_operations->size());
|
arangodb::PhysicalCollection* collPtr = _collection->getPhysical();
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(collPtr)->increaseUncollectedLogfileEntries(_operations->size());
|
||||||
}
|
}
|
||||||
|
|
||||||
delete _operations;
|
delete _operations;
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "Aql/QueryCache.h"
|
#include "Aql/QueryCache.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h"
|
||||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
#include "MMFiles/MMFilesLogfileManager.h"
|
#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
|
@ -298,7 +299,8 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId,
|
||||||
arangodb::aql::QueryCache::instance()->invalidate(
|
arangodb::aql::QueryCache::instance()->invalidate(
|
||||||
_vocbase, collection->name());
|
_vocbase, collection->name());
|
||||||
|
|
||||||
collection->increaseUncollectedLogfileEntries(1);
|
auto cptr = collection->getPhysical();
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(cptr)->increaseUncollectedLogfileEntries(1);
|
||||||
} else {
|
} else {
|
||||||
// operation is buffered and might be rolled back
|
// operation is buffered and might be rolled back
|
||||||
TransactionCollection* trxCollection = this->collection(collection->cid(), AccessMode::Type::WRITE);
|
TransactionCollection* trxCollection = this->collection(collection->cid(), AccessMode::Type::WRITE);
|
||||||
|
|
|
@ -917,6 +917,9 @@ static TRI_action_result_t ExecuteActionVocbase(
|
||||||
if (tryCatch.CanContinue()) {
|
if (tryCatch.CanContinue()) {
|
||||||
response->setResponseCode(rest::ResponseCode::SERVER_ERROR);
|
response->setResponseCode(rest::ResponseCode::SERVER_ERROR);
|
||||||
|
|
||||||
|
std::string jsError = TRI_StringifyV8Exception(isolate, &tryCatch);
|
||||||
|
LOG_TOPIC(WARN, arangodb::Logger::V8) << "Caught an error while executing an action: " << jsError;
|
||||||
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
// TODO how to generalize this?
|
// TODO how to generalize this?
|
||||||
if (response->transportType() ==
|
if (response->transportType() ==
|
||||||
Endpoint::TransportType::HTTP) { // FIXME
|
Endpoint::TransportType::HTTP) { // FIXME
|
||||||
|
@ -924,6 +927,7 @@ static TRI_action_result_t ExecuteActionVocbase(
|
||||||
->body()
|
->body()
|
||||||
.appendText(TRI_StringifyV8Exception(isolate, &tryCatch));
|
.appendText(TRI_StringifyV8Exception(isolate, &tryCatch));
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
v8g->_canceled = true;
|
v8g->_canceled = true;
|
||||||
result.isValid = false;
|
result.isValid = false;
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "StorageEngine/EngineSelectorFeature.h"
|
#include "StorageEngine/EngineSelectorFeature.h"
|
||||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||||
//#include "MMFiles/MMFilesLogfileManager.h"
|
//#include "MMFiles/MMFilesLogfileManager.h"
|
||||||
|
#include "MMFiles/MMFilesCollection.h" //remove
|
||||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||||
#include "MMFiles/MMFilesIndexElement.h"
|
#include "MMFiles/MMFilesIndexElement.h"
|
||||||
#include "MMFiles/MMFilesToken.h"
|
#include "MMFiles/MMFilesToken.h"
|
||||||
|
@ -230,7 +231,6 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
|
||||||
_nextCompactionStartIndex(0),
|
_nextCompactionStartIndex(0),
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
|
||||||
_isInitialIteration(false),
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
||||||
|
@ -295,7 +295,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
||||||
_nextCompactionStartIndex(0),
|
_nextCompactionStartIndex(0),
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
|
||||||
_isInitialIteration(false),
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
if (!IsAllowedName(info)) {
|
if (!IsAllowedName(info)) {
|
||||||
|
@ -602,26 +601,7 @@ bool LogicalCollection::IsAllowedName(bool allowSystem,
|
||||||
|
|
||||||
/// @brief whether or not a collection is fully collected
|
/// @brief whether or not a collection is fully collected
|
||||||
bool LogicalCollection::isFullyCollected() {
|
bool LogicalCollection::isFullyCollected() {
|
||||||
int64_t uncollected = _uncollectedLogfileEntries.load();
|
return getPhysical()->isFullyCollected();
|
||||||
|
|
||||||
return (uncollected == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogicalCollection::setNextCompactionStartIndex(size_t index) {
|
|
||||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
|
||||||
_nextCompactionStartIndex = index;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t LogicalCollection::getNextCompactionStartIndex() {
|
|
||||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
|
||||||
return _nextCompactionStartIndex;
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogicalCollection::setCompactionStatus(char const* reason) {
|
|
||||||
TRI_ASSERT(reason != nullptr);
|
|
||||||
|
|
||||||
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
|
||||||
_lastCompactionStatus = reason;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t LogicalCollection::numberDocuments() const {
|
uint64_t LogicalCollection::numberDocuments() const {
|
||||||
|
@ -1193,7 +1173,12 @@ std::shared_ptr<arangodb::velocypack::Builder> LogicalCollection::figures() {
|
||||||
|
|
||||||
builder->add("lastTick", VPackValue(_maxTick));
|
builder->add("lastTick", VPackValue(_maxTick));
|
||||||
builder->add("uncollectedLogfileEntries",
|
builder->add("uncollectedLogfileEntries",
|
||||||
VPackValue(_uncollectedLogfileEntries));
|
VPackValue(
|
||||||
|
//MOVE TO PHYSICAL
|
||||||
|
static_cast<arangodb::MMFilesCollection*>(getPhysical())
|
||||||
|
->uncollectedLogfileEntries()
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
// fills in compaction status
|
// fills in compaction status
|
||||||
char const* lastCompactionStatus = "-";
|
char const* lastCompactionStatus = "-";
|
||||||
|
|
|
@ -108,28 +108,27 @@ class LogicalCollection {
|
||||||
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
||||||
|
|
||||||
// TODO: MOVE TO PHYSICAL?
|
// TODO: MOVE TO PHYSICAL?
|
||||||
bool isFullyCollected();
|
bool isFullyCollected(); //should not be exposed
|
||||||
int64_t uncollectedLogfileEntries() const {
|
|
||||||
return _uncollectedLogfileEntries.load();
|
void setNextCompactionStartIndex(size_t index){
|
||||||
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
|
_nextCompactionStartIndex = index;
|
||||||
}
|
}
|
||||||
|
|
||||||
void increaseUncollectedLogfileEntries(int64_t value) {
|
size_t getNextCompactionStartIndex(){
|
||||||
_uncollectedLogfileEntries += value;
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
|
return _nextCompactionStartIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
void decreaseUncollectedLogfileEntries(int64_t value) {
|
void setCompactionStatus(char const* reason){
|
||||||
_uncollectedLogfileEntries -= value;
|
TRI_ASSERT(reason != nullptr);
|
||||||
if (_uncollectedLogfileEntries < 0) {
|
MUTEX_LOCKER(mutexLocker, _compactionStatusLock);
|
||||||
_uncollectedLogfileEntries = 0;
|
_lastCompactionStatus = reason;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void setNextCompactionStartIndex(size_t);
|
|
||||||
size_t getNextCompactionStartIndex();
|
|
||||||
void setCompactionStatus(char const*);
|
|
||||||
double lastCompactionStamp() const { return _lastCompactionStamp; }
|
double lastCompactionStamp() const { return _lastCompactionStamp; }
|
||||||
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
|
void lastCompactionStamp(double value) { _lastCompactionStamp = value; }
|
||||||
|
|
||||||
|
|
||||||
void setRevisionError() { _revisionError = true; }
|
void setRevisionError() { _revisionError = true; }
|
||||||
|
|
||||||
// SECTION: Meta Information
|
// SECTION: Meta Information
|
||||||
|
@ -611,8 +610,6 @@ class LogicalCollection {
|
||||||
char const* _lastCompactionStatus;
|
char const* _lastCompactionStatus;
|
||||||
double _lastCompactionStamp;
|
double _lastCompactionStamp;
|
||||||
|
|
||||||
std::atomic<int64_t> _uncollectedLogfileEntries;
|
|
||||||
|
|
||||||
/// @brief: flag that is set to true when the documents are
|
/// @brief: flag that is set to true when the documents are
|
||||||
/// initial enumerated and the primary index is built
|
/// initial enumerated and the primary index is built
|
||||||
bool _isInitialIteration;
|
bool _isInitialIteration;
|
||||||
|
|
|
@ -101,6 +101,8 @@ class PhysicalCollection {
|
||||||
virtual void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
virtual void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
||||||
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
||||||
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
||||||
|
|
||||||
|
virtual bool isFullyCollected() const = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
LogicalCollection* _logicalCollection;
|
LogicalCollection* _logicalCollection;
|
||||||
|
|
|
@ -97,6 +97,52 @@ else ()
|
||||||
add_dependencies(arangodump zlibstatic)
|
add_dependencies(arangodump zlibstatic)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
## arangoexport
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
if (MSVC)
|
||||||
|
generate_product_version(ProductVersionFiles_arangoexport
|
||||||
|
NAME arangoexport
|
||||||
|
FILE_DESCRIPTION ${ARANGO_EXPORT_FRIENDLY_STRING}
|
||||||
|
ICON ${ARANGO_ICON}
|
||||||
|
VERSION_MAJOR ${CPACK_PACKAGE_VERSION_MAJOR}
|
||||||
|
VERSION_MINOR ${CPACK_PACKAGE_VERSION_MINOR}
|
||||||
|
VERSION_PATCH ${CPACK_PACKAGE_VERSION_PATCH}
|
||||||
|
VERSION_REVISION ${BUILD_ID}
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
add_executable(${BIN_ARANGOEXPORT}
|
||||||
|
${ProductVersionFiles_arangoexport}
|
||||||
|
${PROJECT_SOURCE_DIR}/lib/Basics/WorkMonitorDummy.cpp
|
||||||
|
Export/ExportFeature.cpp
|
||||||
|
Export/arangoexport.cpp
|
||||||
|
Shell/ClientFeature.cpp
|
||||||
|
Shell/ConsoleFeature.cpp
|
||||||
|
V8Client/ArangoClientHelper.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
target_link_libraries(${BIN_ARANGOEXPORT}
|
||||||
|
${LIB_ARANGO}
|
||||||
|
${MSVC_LIBS}
|
||||||
|
${SYSTEM_LIBRARIES}
|
||||||
|
boost_system
|
||||||
|
boost_boost
|
||||||
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
TARGETS ${BIN_ARANGOEXPORT}
|
||||||
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
|
|
||||||
|
install_config(arangoexport)
|
||||||
|
|
||||||
|
if (NOT USE_PRECOMPILED_V8)
|
||||||
|
add_dependencies(arangoexport zlibstatic v8_build) # v8_build includes ICU build
|
||||||
|
else ()
|
||||||
|
add_dependencies(arangoexport zlibstatic) # v8_build includes ICU build
|
||||||
|
endif ()
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
## arangoimp
|
## arangoimp
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -0,0 +1,601 @@
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Manuel Baesler
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#include "ExportFeature.h"
|
||||||
|
|
||||||
|
#include "ApplicationFeatures/ApplicationServer.h"
|
||||||
|
#include "Basics/FileUtils.h"
|
||||||
|
#include "Basics/StringUtils.h"
|
||||||
|
#include "Logger/Logger.h"
|
||||||
|
#include "ProgramOptions/ProgramOptions.h"
|
||||||
|
#include "Shell/ClientFeature.h"
|
||||||
|
#include "SimpleHttpClient/GeneralClientConnection.h"
|
||||||
|
#include "SimpleHttpClient/SimpleHttpClient.h"
|
||||||
|
#include "SimpleHttpClient/SimpleHttpResult.h"
|
||||||
|
|
||||||
|
#include <boost/property_tree/detail/xml_parser_utils.hpp>
|
||||||
|
|
||||||
|
using namespace arangodb;
|
||||||
|
using namespace arangodb::basics;
|
||||||
|
using namespace arangodb::httpclient;
|
||||||
|
using namespace arangodb::options;
|
||||||
|
using namespace boost::property_tree::xml_parser;
|
||||||
|
|
||||||
|
ExportFeature::ExportFeature(application_features::ApplicationServer* server,
|
||||||
|
int* result)
|
||||||
|
: ApplicationFeature(server, "Export"),
|
||||||
|
_collections(),
|
||||||
|
_graphName(),
|
||||||
|
_xgmmlLabelAttribute("label"),
|
||||||
|
_typeExport("json"),
|
||||||
|
_xgmmlLabelOnly(false),
|
||||||
|
_outputDirectory(),
|
||||||
|
_overwrite(false),
|
||||||
|
_progress(true),
|
||||||
|
_firstLine(true),
|
||||||
|
_skippedDeepNested(0),
|
||||||
|
_httpRequestsDone(0),
|
||||||
|
_currentCollection(),
|
||||||
|
_currentGraph(),
|
||||||
|
_result(result) {
|
||||||
|
requiresElevatedPrivileges(false);
|
||||||
|
setOptional(false);
|
||||||
|
startsAfter("Client");
|
||||||
|
startsAfter("Config");
|
||||||
|
startsAfter("Logger");
|
||||||
|
|
||||||
|
_outputDirectory =
|
||||||
|
FileUtils::buildFilename(FileUtils::currentDirectory(), "export");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::collectOptions(
|
||||||
|
std::shared_ptr<options::ProgramOptions> options) {
|
||||||
|
options->addOption(
|
||||||
|
"--collection",
|
||||||
|
"restrict to collection name (can be specified multiple times)",
|
||||||
|
new VectorParameter<StringParameter>(&_collections));
|
||||||
|
|
||||||
|
options->addOption("--graph-name", "name of a graph to export",
|
||||||
|
new StringParameter(&_graphName));
|
||||||
|
|
||||||
|
options->addOption("--xgmml-label-only", "export only xgmml label",
|
||||||
|
new BooleanParameter(&_xgmmlLabelOnly));
|
||||||
|
|
||||||
|
options->addOption("--xgmml-label-attribute", "specify document attribute that will be the xgmml label",
|
||||||
|
new StringParameter(&_xgmmlLabelAttribute));
|
||||||
|
|
||||||
|
options->addOption("--output-directory", "output directory",
|
||||||
|
new StringParameter(&_outputDirectory));
|
||||||
|
|
||||||
|
options->addOption("--overwrite", "overwrite data in output directory",
|
||||||
|
new BooleanParameter(&_overwrite));
|
||||||
|
|
||||||
|
options->addOption("--progress", "show progress",
|
||||||
|
new BooleanParameter(&_progress));
|
||||||
|
|
||||||
|
std::unordered_set<std::string> exportsWithUpperCase = {"json", "jsonl", "xgmml",
|
||||||
|
"JSON", "JSONL", "XGMML"};
|
||||||
|
std::unordered_set<std::string> exports = {"json", "jsonl", "xgmml"};
|
||||||
|
std::vector<std::string> exportsVector(exports.begin(), exports.end());
|
||||||
|
std::string exportsJoined = StringUtils::join(exportsVector, ", ");
|
||||||
|
options->addOption(
|
||||||
|
"--type", "type of export (" + exportsJoined + ")",
|
||||||
|
new DiscreteValuesParameter<StringParameter>(&_typeExport, exportsWithUpperCase));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::validateOptions(
|
||||||
|
std::shared_ptr<options::ProgramOptions> options) {
|
||||||
|
auto const& positionals = options->processingResult()._positionals;
|
||||||
|
size_t n = positionals.size();
|
||||||
|
|
||||||
|
if (1 == n) {
|
||||||
|
_outputDirectory = positionals[0];
|
||||||
|
} else if (1 < n) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at most one directory, got " +
|
||||||
|
StringUtils::join(positionals, ", ");
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
// trim trailing slash from path because it may cause problems on ...
|
||||||
|
// Windows
|
||||||
|
if (!_outputDirectory.empty() &&
|
||||||
|
_outputDirectory.back() == TRI_DIR_SEPARATOR_CHAR) {
|
||||||
|
TRI_ASSERT(_outputDirectory.size() > 0);
|
||||||
|
_outputDirectory.pop_back();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_graphName.empty() && _collections.empty()) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one collection or one graph name";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::transform(_typeExport.begin(), _typeExport.end(), _typeExport.begin(), ::tolower);
|
||||||
|
|
||||||
|
if (_typeExport == "xgmml" && _graphName.empty() ) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting a graph name to dump a graph";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::prepare() {
|
||||||
|
bool isDirectory = false;
|
||||||
|
bool isEmptyDirectory = false;
|
||||||
|
|
||||||
|
if (!_outputDirectory.empty()) {
|
||||||
|
isDirectory = TRI_IsDirectory(_outputDirectory.c_str());
|
||||||
|
|
||||||
|
if (isDirectory) {
|
||||||
|
std::vector<std::string> files(TRI_FullTreeDirectory(_outputDirectory.c_str()));
|
||||||
|
// we don't care if the target directory is empty
|
||||||
|
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory always returns at least one element (""), even if directory is empty?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_outputDirectory.empty() ||
|
||||||
|
(TRI_ExistsFile(_outputDirectory.c_str()) && !isDirectory)) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::SYSCALL) << "cannot write to output directory '" << _outputDirectory
|
||||||
|
<< "'";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isDirectory && !isEmptyDirectory && !_overwrite) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::SYSCALL) << "output directory '" << _outputDirectory
|
||||||
|
<< "' already exists. use \"--overwrite true\" to "
|
||||||
|
"overwrite data in it";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isDirectory) {
|
||||||
|
long systemError;
|
||||||
|
std::string errorMessage;
|
||||||
|
int res = TRI_CreateDirectory(_outputDirectory.c_str(), systemError,
|
||||||
|
errorMessage);
|
||||||
|
|
||||||
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
|
LOG_TOPIC(ERR, Logger::SYSCALL) << "unable to create output directory '" << _outputDirectory
|
||||||
|
<< "': " << errorMessage;
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::start() {
|
||||||
|
ClientFeature* client = application_features::ApplicationServer::getFeature<ClientFeature>("Client");
|
||||||
|
|
||||||
|
int ret = EXIT_SUCCESS;
|
||||||
|
*_result = ret;
|
||||||
|
|
||||||
|
std::unique_ptr<SimpleHttpClient> httpClient;
|
||||||
|
|
||||||
|
try {
|
||||||
|
httpClient = client->createHttpClient();
|
||||||
|
} catch (...) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << "cannot create server connection, giving up!";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient->setLocationRewriter(static_cast<void*>(client), &rewriteLocation);
|
||||||
|
httpClient->setUserNamePassword("/", client->username(), client->password());
|
||||||
|
|
||||||
|
// must stay here in order to establish the connection
|
||||||
|
httpClient->getServerVersion();
|
||||||
|
|
||||||
|
if (!httpClient->isConnected()) {
|
||||||
|
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "Could not connect to endpoint '" << client->endpoint()
|
||||||
|
<< "', database: '" << client->databaseName() << "', username: '"
|
||||||
|
<< client->username() << "'";
|
||||||
|
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << httpClient->getErrorMessage() << "'";
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
// successfully connected
|
||||||
|
std::cout << "Connected to ArangoDB '"
|
||||||
|
<< httpClient->getEndpointSpecification() << "', version "
|
||||||
|
<< httpClient->getServerVersion() << ", database: '"
|
||||||
|
<< client->databaseName() << "', username: '" << client->username()
|
||||||
|
<< "'" << std::endl;
|
||||||
|
|
||||||
|
uint64_t exportedSize = 0;
|
||||||
|
|
||||||
|
if (_typeExport == "json" || _typeExport == "jsonl") {
|
||||||
|
if (_collections.size()) {
|
||||||
|
collectionExport(httpClient.get());
|
||||||
|
|
||||||
|
for(auto const& collection : _collections) {
|
||||||
|
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
|
||||||
|
int64_t fileSize = TRI_SizeFile(filePath.c_str());
|
||||||
|
|
||||||
|
if (0 < fileSize) {
|
||||||
|
exportedSize += fileSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (_typeExport == "xgmml" && _graphName.size()) {
|
||||||
|
graphExport(httpClient.get());
|
||||||
|
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
|
||||||
|
int64_t fileSize = TRI_SizeFile(filePath.c_str());
|
||||||
|
|
||||||
|
if (0 < fileSize) {
|
||||||
|
exportedSize += fileSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "Processed " << _collections.size() << " collection(s), wrote " << exportedSize << " Byte(s), " << _httpRequestsDone << " HTTP request(s)" << std::endl;
|
||||||
|
|
||||||
|
*_result = ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
|
||||||
|
std::string errorMsg;
|
||||||
|
|
||||||
|
for (auto const& collection : _collections) {
|
||||||
|
if (_progress) {
|
||||||
|
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
_currentCollection = collection;
|
||||||
|
|
||||||
|
std::string fileName =
|
||||||
|
_outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
|
||||||
|
|
||||||
|
// remove an existing file first
|
||||||
|
if (TRI_ExistsFile(fileName.c_str())) {
|
||||||
|
TRI_UnlinkFile(fileName.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
int fd = -1;
|
||||||
|
TRI_DEFER(TRI_CLOSE(fd));
|
||||||
|
|
||||||
|
std::string const url = "_api/cursor";
|
||||||
|
|
||||||
|
VPackBuilder post;
|
||||||
|
post.openObject();
|
||||||
|
post.add("query", VPackValue("FOR doc IN @@collection RETURN doc"));
|
||||||
|
post.add("bindVars", VPackValue(VPackValueType::Object));
|
||||||
|
post.add("@collection", VPackValue(collection));
|
||||||
|
post.close();
|
||||||
|
post.close();
|
||||||
|
|
||||||
|
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
|
||||||
|
VPackSlice body = parsedBody->slice();
|
||||||
|
|
||||||
|
fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
|
||||||
|
S_IRUSR | S_IWUSR);
|
||||||
|
|
||||||
|
if (fd < 0) {
|
||||||
|
errorMsg = "cannot write to file '" + fileName + "'";
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
_firstLine = true;
|
||||||
|
if (_typeExport == "json") {
|
||||||
|
std::string openingBracket = "[\n";
|
||||||
|
writeToFile(fd, openingBracket, fileName);
|
||||||
|
}
|
||||||
|
|
||||||
|
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||||
|
|
||||||
|
while (body.hasKey("id")) {
|
||||||
|
std::string const url = "/_api/cursor/"+body.get("id").copyString();
|
||||||
|
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
|
||||||
|
body = parsedBody->slice();
|
||||||
|
|
||||||
|
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||||
|
}
|
||||||
|
if (_typeExport == "json") {
|
||||||
|
std::string closingBracket = "]\n";
|
||||||
|
writeToFile(fd, closingBracket , fileName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
|
||||||
|
std::string line;
|
||||||
|
|
||||||
|
for (auto const& doc : it) {
|
||||||
|
line.clear();
|
||||||
|
|
||||||
|
if (_firstLine && _typeExport == "json") {
|
||||||
|
_firstLine = false;
|
||||||
|
} else if(!_firstLine && _typeExport == "json") {
|
||||||
|
line.push_back(',');
|
||||||
|
}
|
||||||
|
|
||||||
|
line += doc.toJson();
|
||||||
|
line.push_back('\n');
|
||||||
|
writeToFile(fd, line, fileName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::writeToFile(int fd, std::string& line, std::string const& fileName) {
|
||||||
|
if (!TRI_WritePointer(fd, line.c_str(), line.size())) {
|
||||||
|
std::string errorMsg = "cannot write to file '" + fileName + "'";
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<VPackBuilder> ExportFeature::httpCall(SimpleHttpClient* httpClient, std::string const& url, rest::RequestType requestType, std::string postBody) {
|
||||||
|
std::string errorMsg;
|
||||||
|
|
||||||
|
std::unique_ptr<SimpleHttpResult> response(
|
||||||
|
httpClient->request(requestType, url, postBody.c_str(), postBody.size()));
|
||||||
|
_httpRequestsDone++;
|
||||||
|
|
||||||
|
if (response == nullptr || !response->isComplete()) {
|
||||||
|
errorMsg =
|
||||||
|
"got invalid response from server: " + httpClient->getErrorMessage();
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<VPackBuilder> parsedBody;
|
||||||
|
|
||||||
|
if (response->wasHttpError()) {
|
||||||
|
|
||||||
|
if (response->getHttpReturnCode() == 404) {
|
||||||
|
if (_currentGraph.size()) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::CONFIG) << "Graph '" << _currentGraph << "' not found.";
|
||||||
|
} else if (_currentCollection.size()) {
|
||||||
|
LOG_TOPIC(FATAL, Logger::CONFIG) << "Collection " << _currentCollection << "not found.";
|
||||||
|
}
|
||||||
|
|
||||||
|
FATAL_ERROR_EXIT();
|
||||||
|
} else {
|
||||||
|
parsedBody = response->getBodyVelocyPack();
|
||||||
|
std::cout << parsedBody->toJson() << std::endl;
|
||||||
|
errorMsg = "got invalid response from server: HTTP " +
|
||||||
|
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||||
|
response->getHttpReturnMessage();
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
parsedBody = response->getBodyVelocyPack();
|
||||||
|
} catch (...) {
|
||||||
|
errorMsg = "got malformed JSON response from server";
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
VPackSlice body = parsedBody->slice();
|
||||||
|
|
||||||
|
if (!body.isObject()) {
|
||||||
|
errorMsg = "got malformed JSON response from server";
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsedBody;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::graphExport(SimpleHttpClient* httpClient) {
|
||||||
|
std::string errorMsg;
|
||||||
|
|
||||||
|
_currentGraph = _graphName;
|
||||||
|
|
||||||
|
if (_collections.empty()) {
|
||||||
|
if (_progress) {
|
||||||
|
std::cout << "# Export graph '" << _graphName << "'" << std::endl;
|
||||||
|
}
|
||||||
|
std::string const url = "/_api/gharial/" + _graphName;
|
||||||
|
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::GET);
|
||||||
|
VPackSlice body = parsedBody->slice();
|
||||||
|
|
||||||
|
std::unordered_set<std::string> collections;
|
||||||
|
|
||||||
|
for(auto const& edgeDefs : VPackArrayIterator(body.get("graph").get("edgeDefinitions"))) {
|
||||||
|
collections.insert(edgeDefs.get("collection").copyString());
|
||||||
|
|
||||||
|
for(auto const& from : VPackArrayIterator(edgeDefs.get("from"))) {
|
||||||
|
collections.insert(from.copyString());
|
||||||
|
}
|
||||||
|
|
||||||
|
for(auto const& to : VPackArrayIterator(edgeDefs.get("to"))) {
|
||||||
|
collections.insert(to.copyString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto const& cn : collections) {
|
||||||
|
_collections.push_back(cn);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (_progress) {
|
||||||
|
std::cout << "# Export graph with collections " << StringUtils::join(_collections, ", ") << " as '" << _graphName << "'" << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string fileName = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
|
||||||
|
|
||||||
|
// remove an existing file first
|
||||||
|
if (TRI_ExistsFile(fileName.c_str())) {
|
||||||
|
TRI_UnlinkFile(fileName.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
int fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC, S_IRUSR | S_IWUSR);
|
||||||
|
|
||||||
|
if (fd < 0) {
|
||||||
|
errorMsg = "cannot write to file '" + fileName + "'";
|
||||||
|
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||||
|
}
|
||||||
|
TRI_DEFER(TRI_CLOSE(fd));
|
||||||
|
|
||||||
|
std::string xmlHeader = R"(<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||||
|
<graph label=")";
|
||||||
|
writeToFile(fd, xmlHeader, fileName);
|
||||||
|
writeToFile(fd, _graphName, fileName);
|
||||||
|
|
||||||
|
xmlHeader = R"("
|
||||||
|
xmlns="http://www.cs.rpi.edu/XGMML"
|
||||||
|
directed="1">
|
||||||
|
)";
|
||||||
|
writeToFile(fd, xmlHeader, fileName);
|
||||||
|
|
||||||
|
for (auto const& collection : _collections) {
|
||||||
|
if (_progress) {
|
||||||
|
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string const url = "_api/cursor";
|
||||||
|
|
||||||
|
VPackBuilder post;
|
||||||
|
post.openObject();
|
||||||
|
post.add("query", VPackValue("FOR doc IN @@collection RETURN doc"));
|
||||||
|
post.add("bindVars", VPackValue(VPackValueType::Object));
|
||||||
|
post.add("@collection", VPackValue(collection));
|
||||||
|
post.close();
|
||||||
|
post.close();
|
||||||
|
|
||||||
|
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
|
||||||
|
VPackSlice body = parsedBody->slice();
|
||||||
|
|
||||||
|
writeGraphBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||||
|
|
||||||
|
while (body.hasKey("id")) {
|
||||||
|
std::string const url = "/_api/cursor/"+body.get("id").copyString();
|
||||||
|
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
|
||||||
|
body = parsedBody->slice();
|
||||||
|
|
||||||
|
writeGraphBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::string closingGraphTag = "</graph>\n";
|
||||||
|
writeToFile(fd, closingGraphTag, fileName);
|
||||||
|
|
||||||
|
if (_skippedDeepNested) {
|
||||||
|
std::cout << "skipped " << _skippedDeepNested << " deep nested objects / arrays" << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
|
||||||
|
std::string xmlTag;
|
||||||
|
|
||||||
|
for(auto const& doc : it) {
|
||||||
|
if (doc.hasKey("_from")) {
|
||||||
|
xmlTag = "<edge label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
|
||||||
|
"\" source=\"" + encode_char_entities(doc.get("_from").copyString()) + "\" target=\"" + encode_char_entities(doc.get("_to").copyString()) + "\"";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
if (!_xgmmlLabelOnly) {
|
||||||
|
xmlTag = ">\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
for (auto const& it : VPackObjectIterator(doc)) {
|
||||||
|
xmlTag = encode_char_entities(it.key.copyString());
|
||||||
|
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
xmlTag = "</edge>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
xmlTag = " />\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
xmlTag = "<node label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
|
||||||
|
"\" id=\"" + encode_char_entities(doc.get("_id").copyString()) + "\"";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
if (!_xgmmlLabelOnly) {
|
||||||
|
xmlTag = ">\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
for (auto const& it : VPackObjectIterator(doc)) {
|
||||||
|
xmlTag = encode_char_entities(it.key.copyString());
|
||||||
|
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
|
||||||
|
}
|
||||||
|
|
||||||
|
xmlTag = "</node>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
xmlTag = " />\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackSlice const& slice, std::string& name, int deep) {
|
||||||
|
std::string value, type, xmlTag;
|
||||||
|
|
||||||
|
if (deep == 0 &&
|
||||||
|
(name == "_id" || name == "_key" || name == "_rev" || name == "_from" || name == "_to")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slice.isInteger()) {
|
||||||
|
type = "integer";
|
||||||
|
value = slice.toString();
|
||||||
|
|
||||||
|
} else if (slice.isDouble()) {
|
||||||
|
type = "real";
|
||||||
|
value = slice.toString();
|
||||||
|
|
||||||
|
} else if (slice.isBool()) {
|
||||||
|
type = "boolean";
|
||||||
|
value = slice.toString();
|
||||||
|
|
||||||
|
} else if (slice.isString()) {
|
||||||
|
type = "string";
|
||||||
|
value = slice.copyString();
|
||||||
|
|
||||||
|
} else if (slice.isArray() || slice.isObject()) {
|
||||||
|
if (0 < deep) {
|
||||||
|
if (_skippedDeepNested == 0) {
|
||||||
|
std::cout << "Warning: skip deep nested objects / arrays" << std::endl;
|
||||||
|
}
|
||||||
|
_skippedDeepNested++;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
xmlTag = " <att name=\"" + name + "\" type=\"string\" value=\"" + encode_char_entities(slice.toString()) + "\"/>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!type.empty()) {
|
||||||
|
xmlTag = " <att name=\"" + name + "\" type=\"" + type + "\" value=\"" + encode_char_entities(value) + "\"/>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
} else if (slice.isArray()) {
|
||||||
|
xmlTag = " <att name=\"" + name + "\" type=\"list\">\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
for (auto const& val : VPackArrayIterator(slice)) {
|
||||||
|
xgmmlWriteOneAtt(fd, fileName, val, name, deep + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
xmlTag = " </att>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
} else if (slice.isObject()) {
|
||||||
|
xmlTag = " <att name=\"" + name + "\" type=\"list\">\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
|
||||||
|
for (auto const& it : VPackObjectIterator(slice)) {
|
||||||
|
std::string name = encode_char_entities(it.key.copyString());
|
||||||
|
xgmmlWriteOneAtt(fd, fileName, it.value, name, deep + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
xmlTag = " </att>\n";
|
||||||
|
writeToFile(fd, xmlTag, fileName);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Manuel Baesler
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#ifndef ARANGODB_EXPORT_EXPORT_FEATURE_H
|
||||||
|
#define ARANGODB_EXPORT_EXPORT_FEATURE_H 1
|
||||||
|
|
||||||
|
#include "ApplicationFeatures/ApplicationFeature.h"
|
||||||
|
#include "V8Client/ArangoClientHelper.h"
|
||||||
|
#include "lib/Rest/CommonDefines.h"
|
||||||
|
#include <velocypack/Iterator.h>
|
||||||
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
|
namespace arangodb {
|
||||||
|
namespace httpclient {
|
||||||
|
class GeneralClientConnection;
|
||||||
|
class SimpleHttpClient;
|
||||||
|
class SimpleHttpResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
class ExportFeature final : public application_features::ApplicationFeature,
|
||||||
|
public ArangoClientHelper {
|
||||||
|
public:
|
||||||
|
ExportFeature(application_features::ApplicationServer* server,
|
||||||
|
int* result);
|
||||||
|
|
||||||
|
public:
|
||||||
|
void collectOptions(std::shared_ptr<options::ProgramOptions>) override;
|
||||||
|
void validateOptions(
|
||||||
|
std::shared_ptr<options::ProgramOptions> options) override;
|
||||||
|
void prepare() override final;
|
||||||
|
void start() override final;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void collectionExport(httpclient::SimpleHttpClient* httpClient);
|
||||||
|
void writeCollectionBatch(int fd, VPackArrayIterator it, std::string const& fileName);
|
||||||
|
void graphExport(httpclient::SimpleHttpClient* httpClient);
|
||||||
|
void writeGraphBatch(int fd, VPackArrayIterator it, std::string const& fileName);
|
||||||
|
void xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackSlice const& slice, std::string& name, int deep = 0);
|
||||||
|
|
||||||
|
void writeToFile(int fd, std::string& string, std::string const& fileName);
|
||||||
|
std::shared_ptr<VPackBuilder> httpCall(httpclient::SimpleHttpClient* httpClient, std::string const& url, arangodb::rest::RequestType, std::string postBody = "");
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::string> _collections;
|
||||||
|
std::string _graphName;
|
||||||
|
std::string _xgmmlLabelAttribute;
|
||||||
|
std::string _typeExport;
|
||||||
|
bool _xgmmlLabelOnly;
|
||||||
|
|
||||||
|
std::string _outputDirectory;
|
||||||
|
bool _overwrite;
|
||||||
|
bool _progress;
|
||||||
|
|
||||||
|
bool _firstLine;
|
||||||
|
uint64_t _skippedDeepNested;
|
||||||
|
uint64_t _httpRequestsDone;
|
||||||
|
std::string _currentCollection;
|
||||||
|
std::string _currentGraph;
|
||||||
|
|
||||||
|
int* _result;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,83 @@
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||||
|
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Dr. Frank Celler
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#include "Basics/Common.h"
|
||||||
|
#include "Basics/directories.h"
|
||||||
|
|
||||||
|
#include "ApplicationFeatures/ConfigFeature.h"
|
||||||
|
#include "ApplicationFeatures/GreetingsFeature.h"
|
||||||
|
#include "ApplicationFeatures/ShutdownFeature.h"
|
||||||
|
#include "ApplicationFeatures/TempFeature.h"
|
||||||
|
#include "ApplicationFeatures/VersionFeature.h"
|
||||||
|
#include "Basics/ArangoGlobalContext.h"
|
||||||
|
#include "Export/ExportFeature.h"
|
||||||
|
#include "Logger/Logger.h"
|
||||||
|
#include "Logger/LoggerFeature.h"
|
||||||
|
#include "ProgramOptions/ProgramOptions.h"
|
||||||
|
#include "Random/RandomFeature.h"
|
||||||
|
#include "Shell/ClientFeature.h"
|
||||||
|
#include "Ssl/SslFeature.h"
|
||||||
|
|
||||||
|
using namespace arangodb;
|
||||||
|
using namespace arangodb::application_features;
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
ArangoGlobalContext context(argc, argv, BIN_DIRECTORY);
|
||||||
|
context.installHup();
|
||||||
|
|
||||||
|
std::shared_ptr<options::ProgramOptions> options(new options::ProgramOptions(
|
||||||
|
argv[0], "Usage: arangoexport [<options>]", "For more information use:", BIN_DIRECTORY));
|
||||||
|
|
||||||
|
ApplicationServer server(options, BIN_DIRECTORY);
|
||||||
|
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
server.addFeature(new ClientFeature(&server));
|
||||||
|
server.addFeature(new ConfigFeature(&server, "arangoexport"));
|
||||||
|
server.addFeature(new GreetingsFeature(&server, "arangoexport"));
|
||||||
|
server.addFeature(new ExportFeature(&server, &ret));
|
||||||
|
server.addFeature(new LoggerFeature(&server, false));
|
||||||
|
server.addFeature(new RandomFeature(&server));
|
||||||
|
server.addFeature(new ShutdownFeature(&server, {"Export"}));
|
||||||
|
server.addFeature(new SslFeature(&server));
|
||||||
|
server.addFeature(new TempFeature(&server, "arangoexport"));
|
||||||
|
server.addFeature(new VersionFeature(&server));
|
||||||
|
|
||||||
|
try {
|
||||||
|
server.run(argc, argv);
|
||||||
|
if (server.helpShown()) {
|
||||||
|
// --help was displayed
|
||||||
|
ret = EXIT_SUCCESS;
|
||||||
|
}
|
||||||
|
} catch (std::exception const& ex) {
|
||||||
|
LOG_TOPIC(ERR, Logger::STARTUP) << "arangoexport terminated because of an unhandled exception: "
|
||||||
|
<< ex.what();
|
||||||
|
ret = EXIT_FAILURE;
|
||||||
|
} catch (...) {
|
||||||
|
LOG_TOPIC(ERR, Logger::STARTUP) << "arangoexport terminated because of an unhandled exception of "
|
||||||
|
"unknown type";
|
||||||
|
ret = EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return context.exit(ret);
|
||||||
|
}
|
|
@ -24,6 +24,11 @@ install_debinfo(
|
||||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
"${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
|
"${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
|
install_debinfo(
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||||
|
"${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||||
install_debinfo(
|
install_debinfo(
|
||||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||||
|
|
|
@ -61,6 +61,20 @@ install(
|
||||||
install_config(arangorestore)
|
install_config(arangorestore)
|
||||||
|
|
||||||
|
|
||||||
|
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX})
|
||||||
|
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX})
|
||||||
|
if (NOT MSVC AND CMAKE_STRIP)
|
||||||
|
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||||
|
execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR})
|
||||||
|
execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE})
|
||||||
|
set(FILE ${STRIP_FILE})
|
||||||
|
endif()
|
||||||
|
install(
|
||||||
|
PROGRAMS ${FILE}
|
||||||
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
|
install_config(arangoexport)
|
||||||
|
|
||||||
|
|
||||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||||
if (NOT MSVC AND CMAKE_STRIP)
|
if (NOT MSVC AND CMAKE_STRIP)
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
# config file for arangoexport
|
||||||
|
|
||||||
|
[server]
|
||||||
|
endpoint = tcp://127.0.0.1:8529
|
||||||
|
authentication = true
|
||||||
|
# username = root
|
||||||
|
# password =
|
||||||
|
|
||||||
|
[log]
|
||||||
|
file = -
|
|
@ -0,0 +1,7 @@
|
||||||
|
[server]
|
||||||
|
authentication = false
|
||||||
|
# username = root
|
||||||
|
# password =
|
||||||
|
|
||||||
|
[log]
|
||||||
|
file = -
|
|
@ -561,7 +561,10 @@ actions.defineHttp({
|
||||||
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
|
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
|
||||||
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
|
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
|
||||||
'Primary does not have the given oldSecondary as ' +
|
'Primary does not have the given oldSecondary as ' +
|
||||||
'its secondary, current value: ' + JSON.stringify(oldValue));
|
'its secondary, current value: '
|
||||||
|
+ JSON.stringify(
|
||||||
|
fetchKey(oldValue, 'arango', 'Plan', 'DBServers', body.primary)
|
||||||
|
));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -577,27 +580,32 @@ actions.defineHttp({
|
||||||
function changeAllShardReponsibilities (oldServer, newServer) {
|
function changeAllShardReponsibilities (oldServer, newServer) {
|
||||||
// This is only called when we have the write lock and we "only" have to
|
// This is only called when we have the write lock and we "only" have to
|
||||||
// make sure that either all or none of the shards are moved.
|
// make sure that either all or none of the shards are moved.
|
||||||
var collections = ArangoAgency.get('Plan/Collections');
|
var databases = ArangoAgency.get('Plan/Collections');
|
||||||
collections = collections.arango.Plan.Collections;
|
databases = databases.arango.Plan.Collections;
|
||||||
|
|
||||||
let operations = {};
|
let operations = {};
|
||||||
let preconditions = {};
|
let preconditions = {};
|
||||||
Object.keys(collections).forEach(function (collectionKey) {
|
Object.keys(databases).forEach(function(databaseName) {
|
||||||
var collection = collections[collectionKey];
|
var collections = databases[databaseName];
|
||||||
var old = _.cloneDeep(collection);
|
|
||||||
|
|
||||||
Object.keys(collection.shards).forEach(function (shardKey) {
|
Object.keys(collections).forEach(function(collectionKey) {
|
||||||
var servers = collection.shards[shardKey];
|
var collection = collections[collectionKey];
|
||||||
collection.shards[shardKey] = servers.map(function (server) {
|
|
||||||
if (server === oldServer) {
|
Object.keys(collection.shards).forEach(function (shardKey) {
|
||||||
return newServer;
|
var servers = collection.shards[shardKey];
|
||||||
} else {
|
var oldServers = _.cloneDeep(servers);
|
||||||
return server;
|
servers = servers.map(function(server) {
|
||||||
}
|
if (server === oldServer) {
|
||||||
|
return newServer;
|
||||||
|
} else {
|
||||||
|
return server;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let key = '/arango/Plan/Collections/' + databaseName + '/' + collectionKey + '/shards/' + shardKey;
|
||||||
|
operations[key] = servers;
|
||||||
|
preconditions[key] = {'old': oldServers};
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
operations[collectionKey] = collection;
|
|
||||||
preconditions[collectionKey] = old;
|
|
||||||
});
|
});
|
||||||
return {operations, preconditions};
|
return {operations, preconditions};
|
||||||
}
|
}
|
||||||
|
@ -678,25 +686,24 @@ actions.defineHttp({
|
||||||
}
|
}
|
||||||
|
|
||||||
let operations = {};
|
let operations = {};
|
||||||
operations['Plan/DBServers/' + body.secondary] = body.primary;
|
operations['/arango/Plan/DBServers/' + body.secondary] = body.primary;
|
||||||
operations['Plan/DBServers/' + body.primary] = {'op': 'delete'};
|
operations['/arango/Plan/DBServers/' + body.primary] = {'op': 'delete'};
|
||||||
operations['Plan/Version'] = {'op': 'increment'};
|
operations['/arango/Plan/Version'] = {'op': 'increment'};
|
||||||
|
|
||||||
let preconditions = {};
|
let preconditions = {};
|
||||||
preconditions['Plan/DBServers/' + body.primary] = {'old': body.secondary};
|
preconditions['/arango/Plan/DBServers/' + body.primary] = {'old': body.secondary};
|
||||||
|
|
||||||
let shardChanges = changeAllShardReponsibilities(body.primary, body.secondary);
|
let shardChanges = changeAllShardReponsibilities(body.primary, body.secondary);
|
||||||
operations = Object.assign(operations, shardChanges.operations);
|
operations = Object.assign(operations, shardChanges.operations);
|
||||||
preconditions = Object.assign(preconditions, shardChanges.preconditions);
|
preconditions = Object.assign(preconditions, shardChanges.preconditions);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
global.ArangoAgency.write([[operations, preconditions]]);
|
global.ArangoAgency.write([[operations, preconditions]]);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (e.code === 412) {
|
if (e.code === 412) {
|
||||||
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
|
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
|
||||||
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
|
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
|
||||||
'Primary does not have the given oldSecondary as ' +
|
'Could not change primary to secondary.');
|
||||||
'its secondary, current value: ' + oldValue);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -38,6 +38,7 @@ const functionsDocumentation = {
|
||||||
'cluster_sync': 'cluster sync tests',
|
'cluster_sync': 'cluster sync tests',
|
||||||
'dump': 'dump tests',
|
'dump': 'dump tests',
|
||||||
'dump_authentication': 'dump tests with authentication',
|
'dump_authentication': 'dump tests with authentication',
|
||||||
|
'export': 'export formats tests',
|
||||||
'dfdb': 'start test',
|
'dfdb': 'start test',
|
||||||
'endpoints': 'endpoints tests',
|
'endpoints': 'endpoints tests',
|
||||||
'foxx_manager': 'foxx manager tests',
|
'foxx_manager': 'foxx manager tests',
|
||||||
|
@ -246,6 +247,7 @@ let ARANGODUMP_BIN;
|
||||||
let ARANGOD_BIN;
|
let ARANGOD_BIN;
|
||||||
let ARANGOIMP_BIN;
|
let ARANGOIMP_BIN;
|
||||||
let ARANGORESTORE_BIN;
|
let ARANGORESTORE_BIN;
|
||||||
|
let ARANGOEXPORT_BIN;
|
||||||
let ARANGOSH_BIN;
|
let ARANGOSH_BIN;
|
||||||
let CONFIG_ARANGODB_DIR;
|
let CONFIG_ARANGODB_DIR;
|
||||||
let CONFIG_RELATIVE_DIR;
|
let CONFIG_RELATIVE_DIR;
|
||||||
|
@ -1994,6 +1996,7 @@ let allTests = [
|
||||||
'config',
|
'config',
|
||||||
'dump',
|
'dump',
|
||||||
'dump_authentication',
|
'dump_authentication',
|
||||||
|
'export',
|
||||||
'dfdb',
|
'dfdb',
|
||||||
'endpoints',
|
'endpoints',
|
||||||
'http_server',
|
'http_server',
|
||||||
|
@ -2706,6 +2709,7 @@ testFuncs.config = function (options) {
|
||||||
'arangodump',
|
'arangodump',
|
||||||
'arangoimp',
|
'arangoimp',
|
||||||
'arangorestore',
|
'arangorestore',
|
||||||
|
'arangoexport',
|
||||||
'arangosh',
|
'arangosh',
|
||||||
'arango-dfdb',
|
'arango-dfdb',
|
||||||
'foxx-manager'
|
'foxx-manager'
|
||||||
|
@ -2877,6 +2881,98 @@ testFuncs.dump = function (options) {
|
||||||
return results;
|
return results;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
// / @brief TEST: dump
|
||||||
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
testFuncs.export = function (options) {
|
||||||
|
const cluster = options.cluster ? '-cluster' : '';
|
||||||
|
|
||||||
|
print(CYAN + 'export tests...' + RESET);
|
||||||
|
|
||||||
|
const instanceInfo = startInstance('tcp', options, {}, 'export');
|
||||||
|
|
||||||
|
if (instanceInfo === false) {
|
||||||
|
return {
|
||||||
|
export: {
|
||||||
|
status: false,
|
||||||
|
message: 'failed to start server!'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
print(CYAN + Date() + ': Setting up' + RESET);
|
||||||
|
|
||||||
|
const results = {};
|
||||||
|
|
||||||
|
function shutdown() {
|
||||||
|
print(CYAN + 'Shutting down...' + RESET);
|
||||||
|
shutdownInstance(instanceInfo, options);
|
||||||
|
print(CYAN + 'done.' + RESET);
|
||||||
|
|
||||||
|
print();
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
results.setup = runInArangosh(options, instanceInfo, makePathUnix('js/server/tests/export/export-setup' + cluster + '.js'));
|
||||||
|
if (!checkInstanceAlive(instanceInfo, options) || true !== results.setup.status) {
|
||||||
|
return shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
print(CYAN + Date() + ': Export data' + RESET);
|
||||||
|
|
||||||
|
results.export = (() => {
|
||||||
|
const args = {
|
||||||
|
'configuration': fs.join(CONFIG_DIR, 'arangoexport.conf'),
|
||||||
|
'server.username': options.username,
|
||||||
|
'server.password': options.password,
|
||||||
|
'server.endpoint': instanceInfo.endpoint,
|
||||||
|
'server.database': 'UnitTestsExport',
|
||||||
|
'collection':'UnitTestsExport',
|
||||||
|
'type':'json',
|
||||||
|
'overwrite':true,
|
||||||
|
'output-directory':'export'
|
||||||
|
};
|
||||||
|
|
||||||
|
return executeAndWait(ARANGOEXPORT_BIN, toArgv(args), options);
|
||||||
|
})();
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
results.dump = runArangoDumpRestore(options, instanceInfo, 'dump',
|
||||||
|
'UnitTestsDumpSrc');
|
||||||
|
|
||||||
|
if (checkInstanceAlive(instanceInfo, options) &&
|
||||||
|
(results.dump.status === true)) {
|
||||||
|
print(CYAN + Date() + ': Dump and Restore - restore' + RESET);
|
||||||
|
|
||||||
|
results.restore = runArangoDumpRestore(options, instanceInfo, 'restore',
|
||||||
|
'UnitTestsDumpDst');
|
||||||
|
|
||||||
|
if (checkInstanceAlive(instanceInfo, options) &&
|
||||||
|
(results.restore.status === true)) {
|
||||||
|
print(CYAN + Date() + ': Dump and Restore - dump after restore' + RESET);
|
||||||
|
|
||||||
|
results.test = runInArangosh(options, instanceInfo,
|
||||||
|
makePathUnix('js/server/tests/dump/dump' + cluster + '.js'), {
|
||||||
|
'server.database': 'UnitTestsDumpDst'
|
||||||
|
});
|
||||||
|
|
||||||
|
if (checkInstanceAlive(instanceInfo, options) &&
|
||||||
|
(results.test.status === true)) {
|
||||||
|
print(CYAN + Date() + ': Dump and Restore - teardown' + RESET);
|
||||||
|
|
||||||
|
results.tearDown = runInArangosh(options, instanceInfo,
|
||||||
|
makePathUnix('js/server/tests/dump/dump-teardown' + cluster + '.js'));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}*/
|
||||||
|
|
||||||
|
return shutdown();
|
||||||
|
};
|
||||||
|
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
// / @brief TEST: dump_authentication
|
// / @brief TEST: dump_authentication
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -4270,6 +4366,7 @@ function unitTest (cases, options) {
|
||||||
ARANGOD_BIN = fs.join(BIN_DIR, 'arangod' + executable_ext);
|
ARANGOD_BIN = fs.join(BIN_DIR, 'arangod' + executable_ext);
|
||||||
ARANGOIMP_BIN = fs.join(BIN_DIR, 'arangoimp' + executable_ext);
|
ARANGOIMP_BIN = fs.join(BIN_DIR, 'arangoimp' + executable_ext);
|
||||||
ARANGORESTORE_BIN = fs.join(BIN_DIR, 'arangorestore' + executable_ext);
|
ARANGORESTORE_BIN = fs.join(BIN_DIR, 'arangorestore' + executable_ext);
|
||||||
|
ARANGOEXPORT_BIN = fs.join(BIN_DIR, 'arangoexport' + executable_ext);
|
||||||
ARANGOSH_BIN = fs.join(BIN_DIR, 'arangosh' + executable_ext);
|
ARANGOSH_BIN = fs.join(BIN_DIR, 'arangosh' + executable_ext);
|
||||||
|
|
||||||
CONFIG_ARANGODB_DIR = fs.join(TOP_DIR, builddir, 'etc', 'arangodb3');
|
CONFIG_ARANGODB_DIR = fs.join(TOP_DIR, builddir, 'etc', 'arangodb3');
|
||||||
|
@ -4287,6 +4384,7 @@ function unitTest (cases, options) {
|
||||||
ARANGOD_BIN,
|
ARANGOD_BIN,
|
||||||
ARANGOIMP_BIN,
|
ARANGOIMP_BIN,
|
||||||
ARANGORESTORE_BIN,
|
ARANGORESTORE_BIN,
|
||||||
|
ARANGOEXPORT_BIN,
|
||||||
ARANGOSH_BIN];
|
ARANGOSH_BIN];
|
||||||
for (let b = 0; b < checkFiles.length; ++b) {
|
for (let b = 0; b < checkFiles.length; ++b) {
|
||||||
if (!fs.isFile(checkFiles[b])) {
|
if (!fs.isFile(checkFiles[b])) {
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
/*jshint globalstrict:false, strict:false, maxlen:4000, unused:false */
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief setup collections for dump/reload tests
|
||||||
|
///
|
||||||
|
/// @file
|
||||||
|
///
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Manuel Baesler
|
||||||
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
{
|
||||||
|
const db = require("@arangodb").db;
|
||||||
|
|
||||||
|
try {
|
||||||
|
db._dropDatabase("UnitTestsExport");
|
||||||
|
} catch (e) {}
|
||||||
|
|
||||||
|
db._createDatabase("UnitTestsExport");
|
||||||
|
|
||||||
|
db._useDatabase("UnitTestsExport");
|
||||||
|
|
||||||
|
const col = db._create("UnitTestsExport");
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
col.save({ _key: "export" + i, value1: i, value2: "this is export", value3: "export" + i });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
status: true
|
||||||
|
};
|
|
@ -0,0 +1,51 @@
|
||||||
|
/*jshint globalstrict:false, strict:false, maxlen:4000, unused:false */
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief setup collections for dump/reload tests
|
||||||
|
///
|
||||||
|
/// @file
|
||||||
|
///
|
||||||
|
/// DISCLAIMER
|
||||||
|
///
|
||||||
|
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
/// you may not use this file except in compliance with the License.
|
||||||
|
/// You may obtain a copy of the License at
|
||||||
|
///
|
||||||
|
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
///
|
||||||
|
/// Unless required by applicable law or agreed to in writing, software
|
||||||
|
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
/// See the License for the specific language governing permissions and
|
||||||
|
/// limitations under the License.
|
||||||
|
///
|
||||||
|
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||||
|
///
|
||||||
|
/// @author Manuel Baesler
|
||||||
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
{
|
||||||
|
const db = require("@arangodb").db;
|
||||||
|
|
||||||
|
try {
|
||||||
|
db._dropDatabase("UnitTestsExport");
|
||||||
|
} catch (e) {}
|
||||||
|
|
||||||
|
db._createDatabase("UnitTestsExport");
|
||||||
|
|
||||||
|
db._useDatabase("UnitTestsExport");
|
||||||
|
|
||||||
|
const col = db._create("UnitTestsExport");
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
col.save({ _key: "export" + i, value1: i, value2: "this is export", value3: "export" + i });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
status: true
|
||||||
|
};
|
|
@ -399,12 +399,14 @@ if [ "$SECONDARIES" == "1" ] ; then
|
||||||
let index=1
|
let index=1
|
||||||
PORTTOPSE=`expr 8729 + $NRDBSERVERS - 1`
|
PORTTOPSE=`expr 8729 + $NRDBSERVERS - 1`
|
||||||
for PORT in `seq 8729 $PORTTOPSE` ; do
|
for PORT in `seq 8729 $PORTTOPSE` ; do
|
||||||
|
let dbserverindex=$index-1
|
||||||
mkdir cluster/data$PORT
|
mkdir cluster/data$PORT
|
||||||
|
|
||||||
CLUSTER_ID="Secondary$index"
|
CLUSTER_ID="Secondary$index"
|
||||||
|
|
||||||
echo Registering secondary $CLUSTER_ID for "DBServer$index"
|
DBSERVER_ID=$(curl -s 127.0.0.1:$CO_BASE/_admin/cluster/health | jq '.Health | to_entries | map(select(.value.Role == "DBServer")) | .' | jq -r ".[$dbserverindex].key")
|
||||||
curl -f -X PUT --data "{\"primary\": \"DBServer$index\", \"oldSecondary\": \"none\", \"newSecondary\": \"$CLUSTER_ID\"}" -H "Content-Type: application/json" localhost:$CO_BASE/_admin/cluster/replaceSecondary
|
echo Registering secondary $CLUSTER_ID for $DBSERVER_ID
|
||||||
|
curl -s -f -X PUT --data "{\"primary\": \"$DBSERVER_ID\", \"oldSecondary\": \"none\", \"newSecondary\": \"$CLUSTER_ID\"}" -H "Content-Type: application/json" localhost:$CO_BASE/_admin/cluster/replaceSecondary
|
||||||
echo Starting Secondary $CLUSTER_ID on port $PORT
|
echo Starting Secondary $CLUSTER_ID on port $PORT
|
||||||
${BUILD}/bin/arangod \
|
${BUILD}/bin/arangod \
|
||||||
-c none \
|
-c none \
|
||||||
|
|
|
@ -111,8 +111,12 @@ function main(argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (options.hasOwnProperty('server.endpoint')) {
|
if (options.hasOwnProperty('server.endpoint')) {
|
||||||
|
if (scriptArguments.hasOwnProperty('onlyThisOne')) {
|
||||||
|
throw("don't run the full suite on pre-existing servers");
|
||||||
|
}
|
||||||
startServer = false;
|
startServer = false;
|
||||||
serverEndpoint = options['server.endpoint'];
|
serverEndpoint = options['server.endpoint'];
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let args = [theScript].concat(internal.toArgv(scriptArguments));
|
let args = [theScript].concat(internal.toArgv(scriptArguments));
|
||||||
|
|
Loading…
Reference in New Issue