mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
* 'devel' of github.com:arangodb/arangodb: (47 commits) arangoexport: continue documentation arangoexport: documentation continued arangoexport: config for client package arangoexport: refix xml attribute escaping arangoexport: Documentation README_maintainers: mention pythons webserver to view generated documentation arangoexport: Documentation arangoexport: start documentation arangoexport: switch to LOG_TOPIC arangoexport: xgmml encode attribute values arangoexport: remove unnecessary xmlns declaration arangoexport: added stats http, written size arangoexport: take --xgmml-label-attribute into account arangoexport: xgmml set label attribute arangoexport: error msgs improvements, dont create file if collection or graph are 404 added arangoexport to Documentation/CMakeLists updated ReleaseNotes and added NewFeatures32 arangoexport: added manpage arangoexport: mention in CHANGELOG arangoexport: inital work on test ...
This commit is contained in:
commit
27309d1de0
|
@ -97,6 +97,8 @@ js/apps/system/_admin/aardvark/APP/frontend/build/libs.js.gz
|
|||
js/apps/system/_admin/aardvark/APP/frontend/build/style.css
|
||||
js/apps/system/_admin/aardvark/APP/frontend/build/scripts.html.part
|
||||
|
||||
js/common/tests/shell/shell-database.js
|
||||
|
||||
3rdParty/boost/1.61.0/b2
|
||||
3rdParty/boost/1.61.0/bin.v2/
|
||||
3rdParty/boost/1.61.0/bjam
|
||||
|
|
11
CHANGELOG
11
CHANGELOG
|
@ -1,6 +1,11 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* Removed undocumented internal HTTP API:
|
||||
* PUT _api/edges
|
||||
|
||||
The documented GET _api/edges and the undocumented POST _api/edges remains unmodified.
|
||||
|
||||
* moved V8 code into a git submodule
|
||||
this requires running the command
|
||||
|
||||
|
@ -18,6 +23,12 @@ devel
|
|||
JavaScript document operations from 1239 ("illegal document revision")
|
||||
to 1200 ("conflict").
|
||||
|
||||
* added data export tool, arangoexport.
|
||||
|
||||
arangoexport can be used to export collections to json and jsonl
|
||||
and export a graph or collections to xgmml.
|
||||
|
||||
|
||||
v3.2.alpha1 (2017-02-05)
|
||||
------------------------
|
||||
|
||||
|
|
|
@ -15,7 +15,11 @@ if (POLICY CMP0017)
|
|||
endif ()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE string "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE)
|
||||
set(CMAKE_BUILD_TYPE Release
|
||||
CACHE string
|
||||
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
|
||||
FORCE
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug"
|
||||
|
@ -23,9 +27,9 @@ if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug"
|
|||
OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo"
|
||||
OR CMAKE_BUILD_TYPE STREQUAL "MinSizeRel"
|
||||
OR CMAKE_BUILD_TYPE STREQUAL "None"))
|
||||
|
||||
message(FATAL_ERROR "expecting CMAKE_BUILD_TYPE: None Debug Release RelWithDebInfo MinSizeRel, got ${CMAKE_BUILD_TYPE}.")
|
||||
endif ()
|
||||
add_definitions(-DCURL_STATICLIB=1)
|
||||
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "deployment target for MacOSX; adjust to your sysem")
|
||||
|
||||
|
@ -67,12 +71,12 @@ if (DEBUG_SYNC_REPLICATION)
|
|||
add_definitions("-DDEBUG_SYNC_REPLICATION=1")
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
## ARANGODB
|
||||
################################################################################
|
||||
# ------------------------------------------------------------------------------
|
||||
# VERSION information
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
set(ARANGODB_VERSION_MAJOR "3")
|
||||
set(ARANGODB_VERSION_MINOR "1")
|
||||
set(ARANGODB_VERSION_MINOR "2")
|
||||
set(ARANGODB_VERSION_REVISION "devel")
|
||||
set(ARANGODB_PACKAGE_REVISION "1")
|
||||
|
||||
|
@ -92,6 +96,7 @@ set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
|
|||
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
|
||||
set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
|
||||
set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
|
||||
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - dataexporter")
|
||||
set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer")
|
||||
set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
|
||||
set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
|
||||
|
@ -104,6 +109,7 @@ set(LIB_ARANGO_V8 arango_v8)
|
|||
set(BIN_ARANGOBENCH arangobench)
|
||||
set(BIN_ARANGOD arangod)
|
||||
set(BIN_ARANGODUMP arangodump)
|
||||
set(BIN_ARANGOEXPORT arangoexport)
|
||||
set(BIN_ARANGOIMP arangoimp)
|
||||
set(BIN_ARANGORESTORE arangorestore)
|
||||
set(BIN_ARANGOSH arangosh)
|
||||
|
@ -118,9 +124,9 @@ set(COPY_PACKAGES_LIST)
|
|||
set(CLEAN_PACKAGES_LIST)
|
||||
set(INSTALL_CONFIGFILES_LIST)
|
||||
|
||||
################################################################################
|
||||
## VERSION FILES
|
||||
################################################################################
|
||||
# ------------------------------------------------------------------------------
|
||||
# update files containing VERSION information
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
if (${CMAKE_MAJOR_VERSION} EQUAL 2)
|
||||
set(ARANGODB_BUILD_DATE "YYYY-MM-DD HH:MM:SS")
|
||||
|
@ -146,6 +152,12 @@ configure_file(
|
|||
NEWLINE_STYLE UNIX
|
||||
)
|
||||
|
||||
configure_file(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/js/common/tests/shell/shell-database.js.in"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/js/common/tests/shell/shell-database.js"
|
||||
NEWLINE_STYLE UNIX
|
||||
)
|
||||
|
||||
################################################################################
|
||||
## Find the git revision
|
||||
################################################################################
|
||||
|
@ -811,7 +823,13 @@ add_definitions("-DARANGODB_ZLIB_VERSION=\"${ZLIB_VERSION}\"")
|
|||
## cURL
|
||||
################################################################################
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/3rdParty/curl/curl-7.50.3/include/ ${CMAKE_CURRENT_BINARY_DIR}/3rdParty/curl/curl-7.50.3/include/curl/)
|
||||
add_definitions(-DCURL_STATICLIB=1)
|
||||
|
||||
include_directories(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/3rdParty/curl/curl-7.50.3/include/
|
||||
${CMAKE_CURRENT_BINARY_DIR}/3rdParty/curl/curl-7.50.3/include/curl/
|
||||
)
|
||||
|
||||
################################################################################
|
||||
## PATHS, installation, packages
|
||||
################################################################################
|
||||
|
@ -826,6 +844,7 @@ include(ArangoDBInstall)
|
|||
if (NOT(SKIP_PACKAGING))
|
||||
include(packages/packages)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
## ERRORS FILE
|
||||
################################################################################
|
||||
|
@ -904,6 +923,7 @@ list(INSERT SYSTEM_LIBRARIES 0
|
|||
add_subdirectory(lib)
|
||||
add_subdirectory(arangosh)
|
||||
add_subdirectory(arangod)
|
||||
|
||||
if (USE_BOOST_UNITTESTS)
|
||||
add_subdirectory(UnitTests)
|
||||
endif()
|
||||
|
@ -926,22 +946,23 @@ if (NOT USE_PRECOMPILED_V8)
|
|||
add_dependencies(arangosh v8_build)
|
||||
endif ()
|
||||
|
||||
#copy compile commands to source dir
|
||||
# copy compile commands to source dir
|
||||
if( EXISTS "${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json" )
|
||||
message(STATUS "copy compile_commands.json")
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
)
|
||||
message(STATUS "copy compile_commands.json")
|
||||
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different
|
||||
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
)
|
||||
endif()
|
||||
|
||||
add_custom_target(packages
|
||||
DEPENDS ${PACKAGES_LIST}
|
||||
)
|
||||
)
|
||||
|
||||
add_custom_target(copy_packages
|
||||
DEPENDS ${COPY_PACKAGES_LIST}
|
||||
)
|
||||
)
|
||||
|
||||
add_custom_target(clean_packages
|
||||
DEPENDS ${CLEAN_PACKAGES_LIST}
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
Exporting Data from an ArangoDB database
|
||||
======================================
|
||||
|
||||
To export data from an ArangoDB server instance, you will need to invoke _arangoexport_.
|
||||
_arangoexport_ can be invoked by executing
|
||||
the following command:
|
||||
|
||||
unix> arangoexport --collection test --output-directory "dump"
|
||||
|
||||
This exports the collections *test* into the directory *dump* as one big json array. Every entry
|
||||
in this array is one document from the collection without a specific order. To export more than
|
||||
one collection at a time specify multiple *--collection* options.
|
||||
|
||||
The default output directory is *export*.
|
||||
|
||||
_arangoexport_ will by default connect to the *_system* database using the default
|
||||
endpoint. If you want to connect to a different database or a different endpoint,
|
||||
or use authentication, you can use the following command-line options:
|
||||
|
||||
* *--server.database <string>*: name of the database to connect to
|
||||
* *--server.endpoint <string>*: endpoint to connect to
|
||||
* *--server.username <string>*: username
|
||||
* *--server.password <string>*: password to use (omit this and you'll be prompted for the
|
||||
password)
|
||||
* *--server.authentication <bool>*: whether or not to use authentication
|
||||
|
||||
Here's an example of dumping data from a non-standard endpoint, using a dedicated
|
||||
[database name](../Appendix/Glossary.md#database-name):
|
||||
|
||||
unix> arangoexport --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --collection test --output-directory "my-export"
|
||||
|
||||
When finished, _arangoexport_ will print out a summary line with some aggregate
|
||||
statistics about what it did, e.g.:
|
||||
|
||||
Processed 2 collection(s), wrote 9031763 Byte(s), 78 HTTP request(s)
|
||||
|
||||
|
||||
Export JSON
|
||||
-----------
|
||||
|
||||
unix> arangoexport --type json --collection test
|
||||
|
||||
This exports the collection *test* into the output directory *export* as one json array.
|
||||
|
||||
Export JSONL
|
||||
------------
|
||||
|
||||
unix> arangoexport --type jsonl --collection test
|
||||
|
||||
This exports the collection *test* into the output directory *export* as jsonl. One line is one document as a json.
|
||||
|
||||
Export XGMML
|
||||
------------
|
||||
## XGMML specific options
|
||||
|
||||
*--xgmml-label-attribute* specify the name of the attribute that will become the label in the xgmml file.
|
||||
|
||||
*--xgmml-label-only* set to true will only export the label without any attributes in edges or nodes.
|
||||
|
||||
|
||||
## export with collection defined
|
||||
|
||||
unix> arangoexport --type xgmml --graph-name mygraph --collection vertex --collection edge
|
||||
|
||||
This exports the a unnamed graph with vertex collection *vertex* and edge collection *edge* into the xgmml file *mygraph.xgmml*.
|
||||
|
||||
|
||||
## export with graph defined
|
||||
|
||||
unix> arangoexport --type xgmml --graph-name mygraph
|
||||
|
||||
This exports the named graph mygraph into the xgmml file *mygraph.xgmml*.
|
||||
|
||||
|
||||
## export XGMML without attributes
|
||||
|
||||
unix> arangoexport --type xgmml --graph-name mygraph --xgmml-label-only true
|
||||
|
||||
This exports the named graph mygraph into the xgmml file *mygraph.xgmml* without the *<att>*
|
||||
|
||||
|
||||
## export XGMML with a specific label
|
||||
|
||||
unix> arangoexport --type xgmml --graph-name mygraph --xgmml-label-attribute name
|
||||
|
||||
This exports the named graph mygraph into the xgmml file *mygraph.xgmml* with a label from documents attribute *name* instead of the default attribute *label*
|
|
@ -0,0 +1,59 @@
|
|||
Features and Improvements
|
||||
=========================
|
||||
|
||||
The following list shows in detail which features have been added or improved in
|
||||
ArangoDB 3.2. ArangoDB 3.2 also contains several bugfixes that are not listed
|
||||
here.
|
||||
|
||||
SmartGraphs
|
||||
-----------
|
||||
|
||||
|
||||
Data format
|
||||
-----------
|
||||
|
||||
|
||||
Communication Layer
|
||||
-------------------
|
||||
|
||||
|
||||
Cluster
|
||||
-------
|
||||
|
||||
|
||||
Document revisions cache
|
||||
------------------------
|
||||
|
||||
|
||||
AQL
|
||||
---
|
||||
|
||||
### Functions added
|
||||
|
||||
|
||||
### Optimizer improvements
|
||||
|
||||
|
||||
### Miscellaneous improvements
|
||||
|
||||
|
||||
Audit Log
|
||||
---------
|
||||
|
||||
|
||||
Client tools
|
||||
------------
|
||||
|
||||
Added the tool _arangoexport_ to export collections to json and jsonl. It can also export graphs or collections to xgmml.
|
||||
|
||||
Web Admin Interface
|
||||
-------------------
|
||||
|
||||
|
||||
Authentication
|
||||
--------------
|
||||
|
||||
|
||||
Foxx
|
||||
----
|
||||
|
|
@ -4,6 +4,7 @@ Release Notes
|
|||
Whats New
|
||||
---------
|
||||
|
||||
- [Whats New in 3.2](NewFeatures32.md)
|
||||
- [Whats New in 3.1](NewFeatures31.md)
|
||||
- [Whats New in 3.0](NewFeatures30.md)
|
||||
- [Whats New in 2.8](NewFeatures28.md)
|
||||
|
|
|
@ -144,6 +144,7 @@
|
|||
* [Arangoimp](Administration/Arangoimp.md)
|
||||
* [Arangodump](Administration/Arangodump.md)
|
||||
* [Arangorestore](Administration/Arangorestore.md)
|
||||
* [Arangoexport](Administration/Arangoexport.md)
|
||||
* [Managing Users](Administration/ManagingUsers.md)
|
||||
* [Server Configuration](Administration/Configuration/README.md)
|
||||
* [Managing Endpoints](Administration/Configuration/Endpoint.md)
|
||||
|
@ -192,6 +193,7 @@
|
|||
# * [Server Internals](Architecture/ServerInternals.md)
|
||||
#
|
||||
* [Release notes](ReleaseNotes/README.md)
|
||||
* [Whats New in 3.2](ReleaseNotes/NewFeatures32.md)
|
||||
* [Whats New in 3.1](ReleaseNotes/NewFeatures31.md)
|
||||
* [Incompatible changes in 3.1](ReleaseNotes/UpgradingChanges31.md)
|
||||
* [Whats New in 3.0](ReleaseNotes/NewFeatures30.md)
|
||||
|
|
|
@ -17,6 +17,7 @@ if (USE_MAINTAINER_MODE)
|
|||
man1/arangodump.1
|
||||
man1/arangoimp.1
|
||||
man1/arangorestore.1
|
||||
man1/arangoexport.1
|
||||
man1/arangosh.1
|
||||
man8/rcarangod.8
|
||||
man8/arangod.8
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
NAME
|
||||
<COMMAND> - a tool to export collections of an ArangoDB database
|
||||
SYNOPSIS
|
||||
<COMMAND> [options]
|
||||
DESCRIPTION
|
||||
The <COMMAND> binary can be used to export collections of an ArangoDB
|
||||
database to json and jsonl. It can also export a graph or collections
|
||||
to xgmml.
|
||||
|
||||
<COMMAND> will work on the specified database only. If no database name
|
||||
is specified, <COMMAND> will work on the default database ("_system").
|
||||
|
||||
The exported jsonl files can be re-imported in an ArangoDB database
|
||||
using the arangoimp tool.
|
||||
OPTIONS
|
||||
The <COMMAND> binary has many options that can be used to control its
|
||||
behavior. For a complete list of options, please refer to the
|
||||
ArangoDB online manual, available at https://www.arangodb.com/ or run
|
||||
<COMMAND> --help.
|
||||
|
||||
AUTHOR
|
20
LES-TODOS
20
LES-TODOS
|
@ -31,7 +31,12 @@ MMFiles reference removals from files:
|
|||
- Persistent
|
||||
- Geo
|
||||
- Fulltext
|
||||
|
||||
- index API
|
||||
- StorageEngine specific AQL functions
|
||||
- Register for specific function names => branches to StorageEngine impl
|
||||
- Storage Engine can implement these functions with specific code and interna
|
||||
- e.g.: Geo, Fulltext
|
||||
- Replace Usage of new callback-based IndexIterator
|
||||
|
||||
|
||||
in progress
|
||||
|
@ -45,7 +50,6 @@ to do
|
|||
- fix includes during API conversion
|
||||
- DML API
|
||||
- DDL API
|
||||
- index API
|
||||
- add new serialization RW lock to LogicalCollection. all DML ops must acquire it in read mode, the explicit lock command must acquire it in write mode.
|
||||
- StorageEngineAPI readDocument requires 2 functions:
|
||||
- void readDocument(TOKEN id, VPackBuilder& result) => Collects the document and inserts it asis into result. Does NOT clear result.
|
||||
|
@ -56,11 +60,6 @@ to do
|
|||
- We need to keep in mind the cluster. If a DBServer creates this token-type it has to be translated BEFORE the register is teleported to coordinator
|
||||
- Remove temporary wrapper LogCol::readDocument()
|
||||
- InitialySyncer.cpp knows details of StorageEngine MMFiles
|
||||
- StorageEngine specific AQL functions
|
||||
- Register for specific function names => branches to StorageEngine impl
|
||||
- Storage Engine can implement these functions with specific code and interna
|
||||
- e.g.: Geo, Fulltext
|
||||
- Replace Usage of new callback-based IndexIterator
|
||||
|
||||
MMFiles are known to the following files:
|
||||
* arangod/Aql/Functions.cpp
|
||||
|
@ -96,12 +95,6 @@ MMFiles are known to the following files:
|
|||
- Geo
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Questions
|
||||
---------
|
||||
* For GeoIndex `ignoreNull: true` and `constraint: false` are only set in Cluster mode. Is that art or can it go away?
|
||||
|
@ -113,3 +106,4 @@ OpenIssues Hacki
|
|||
* SingleServerTraverser API does NOT takeover responsibility for slice data. getMore() hopes slices to not go away
|
||||
* This API can be improved if we make better use of those callbacks.
|
||||
* ShortestPathBlock does assume that slices do not walk away.
|
||||
* EdgeCollectionInfos in ShortestPath could share one conditionBuilder.
|
||||
|
|
|
@ -462,7 +462,9 @@ It does not, if `SUMMARY.md` in `Books/ppbooks/` looks like this:
|
|||
If sub-chapters do not show in the navigation, try another browser (Firefox).
|
||||
Chrome's security policies are pretty strict about localhost and file://
|
||||
protocol. You may access the docs through a local web server to lift the
|
||||
restrictions.
|
||||
restrictions. You can use pythons build in http server for this.
|
||||
|
||||
~/books$ python -m SimpleHTTPServer 8000
|
||||
|
||||
To only regereneate one file (faster) you may specify a filter:
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ void AgencyFeature::start() {
|
|||
|
||||
_agent.reset(new consensus::Agent(consensus::config_t(
|
||||
_size, _poolSize, _minElectionTimeout, _maxElectionTimeout, endpoint,
|
||||
_agencyEndpoints, _supervision, _waitForSync, _supervisionFrequency,
|
||||
_agencyEndpoints, _supervision, false, _supervisionFrequency,
|
||||
_compactionStepSize, _compactionKeepSize, _supervisionGracePeriod,
|
||||
_cmdLineTimings)));
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
#include "Agency/Agent.h"
|
||||
#include "Agency/Job.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
using namespace arangodb::consensus;
|
||||
|
||||
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
|
||||
|
@ -111,13 +114,13 @@ bool FailedLeader::start() {
|
|||
std::string curPath =
|
||||
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
|
||||
|
||||
Node const& current = _snapshot(curPath);
|
||||
Node const& planned = _snapshot(planPath);
|
||||
auto const& current = _snapshot(curPath).slice();
|
||||
auto const& planned = _snapshot(planPath).slice();
|
||||
|
||||
if (current.slice().length() == 1) {
|
||||
if (current.length() == 1) {
|
||||
LOG_TOPIC(ERR, Logger::AGENCY)
|
||||
<< "Failed to change leadership for shard " + _shard + " from " + _from
|
||||
+ " to " + _to + ". No in-sync followers:" + current.slice().toJson();
|
||||
+ " to " + _to + ". No in-sync followers:" + current.toJson();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -161,11 +164,30 @@ bool FailedLeader::start() {
|
|||
pending.close();
|
||||
|
||||
// --- Cyclic shift in sync servers
|
||||
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
|
||||
for (size_t i = 1; i < current.slice().length(); ++i) {
|
||||
pending.add(current.slice()[i]);
|
||||
// 1. only proceed if any in sync
|
||||
// 2. find 1st in sync that is is not in failedservers
|
||||
// 3. put all in sync not failed up front
|
||||
// 4. put failed leader
|
||||
// 5. remaining in plan
|
||||
// Distribute shards like to come!
|
||||
std::vector<std::string> planv;
|
||||
for (auto const& i : VPackArrayIterator(planned)) {
|
||||
planv.push_back(i.copyString());
|
||||
}
|
||||
pending.add(current.slice()[0]);
|
||||
|
||||
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
|
||||
for (auto const& i : VPackArrayIterator(current)) {
|
||||
std::string s = i.copyString();
|
||||
if (s != _from) {
|
||||
pending.add(i);
|
||||
planv.erase(std::remove(planv.begin(), planv.end(), s), planv.end());
|
||||
}
|
||||
}
|
||||
pending.add(VPackValue(_from));
|
||||
for (auto const& i : planv) {
|
||||
pending.add(VPackValue(i));
|
||||
}
|
||||
|
||||
pending.close();
|
||||
|
||||
// --- Block shard
|
||||
|
@ -186,12 +208,12 @@ bool FailedLeader::start() {
|
|||
|
||||
// --- Check that Current servers are as we expect
|
||||
pending.add(_agencyPrefix + curPath, VPackValue(VPackValueType::Object));
|
||||
pending.add("old", current.slice());
|
||||
pending.add("old", current);
|
||||
pending.close();
|
||||
|
||||
// --- Check that Current servers are as we expect
|
||||
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Object));
|
||||
pending.add("old", planned.slice());
|
||||
pending.add("old", planned);
|
||||
pending.close();
|
||||
|
||||
// --- Check if shard is not blocked
|
||||
|
|
|
@ -45,8 +45,7 @@ EnumerateCollectionBlock::EnumerateCollectionBlock(
|
|||
_collection->getName(),
|
||||
(ep->_random ? arangodb::Transaction::CursorType::ANY
|
||||
: arangodb::Transaction::CursorType::ALL),
|
||||
Transaction::IndexHandle(), VPackSlice(), _mmdr.get(), 0, UINT64_MAX,
|
||||
1000, false)),
|
||||
_mmdr.get(), 0, UINT64_MAX, 1000, false)),
|
||||
_mustStoreResult(true) {
|
||||
TRI_ASSERT(_cursor->successful());
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include "Graphs.h"
|
||||
#include "Aql/AstNode.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
|
@ -39,10 +40,16 @@ EdgeConditionBuilder::EdgeConditionBuilder(AstNode* modCondition)
|
|||
_toCondition(nullptr),
|
||||
_modCondition(modCondition),
|
||||
_containsCondition(false) {
|
||||
TRI_ASSERT(_modCondition->type == NODE_TYPE_OPERATOR_NARY_AND);
|
||||
#ifdef TRI_ENABLE_MAINTAINER_MODE
|
||||
if (_modCondition != nullptr) {
|
||||
TRI_ASSERT(_modCondition->type == NODE_TYPE_OPERATOR_NARY_AND);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void EdgeConditionBuilder::addConditionPart(AstNode const* part) {
|
||||
TRI_ASSERT(_modCondition != nullptr);
|
||||
TRI_ASSERT(_modCondition->type == NODE_TYPE_OPERATOR_NARY_AND);
|
||||
TRI_ASSERT(!_containsCondition);
|
||||
// The ordering is only maintained before we request a specific
|
||||
// condition
|
||||
|
@ -71,7 +78,7 @@ void EdgeConditionBuilder::swapSides(AstNode* cond) {
|
|||
TRI_ASSERT(_modCondition->numMembers() > 0);
|
||||
}
|
||||
|
||||
AstNode const* EdgeConditionBuilder::getOutboundCondition() {
|
||||
AstNode* EdgeConditionBuilder::getOutboundCondition() {
|
||||
if (_fromCondition == nullptr) {
|
||||
buildFromCondition();
|
||||
}
|
||||
|
@ -80,7 +87,7 @@ AstNode const* EdgeConditionBuilder::getOutboundCondition() {
|
|||
return _modCondition;
|
||||
}
|
||||
|
||||
AstNode const* EdgeConditionBuilder::getInboundCondition() {
|
||||
AstNode* EdgeConditionBuilder::getInboundCondition() {
|
||||
if (_toCondition == nullptr) {
|
||||
buildToCondition();
|
||||
}
|
||||
|
@ -89,6 +96,72 @@ AstNode const* EdgeConditionBuilder::getInboundCondition() {
|
|||
return _modCondition;
|
||||
}
|
||||
|
||||
EdgeConditionBuilderContainer::EdgeConditionBuilderContainer() :
|
||||
EdgeConditionBuilder(nullptr) {
|
||||
auto node = std::make_unique<AstNode>(NODE_TYPE_OPERATOR_NARY_AND);
|
||||
_modCondition = node.get();
|
||||
_astNodes.emplace_back(node.get());
|
||||
node.release();
|
||||
|
||||
auto comp = std::make_unique<AstNode>(NODE_TYPE_VALUE);
|
||||
comp->setValueType(VALUE_TYPE_STRING);
|
||||
comp->setStringValue("", 0);
|
||||
_astNodes.emplace_back(comp.get());
|
||||
_compareNode = comp.release();
|
||||
|
||||
_var = _varGen.createTemporaryVariable();
|
||||
|
||||
auto varNode = std::make_unique<AstNode>(NODE_TYPE_REFERENCE);
|
||||
varNode->setData(_var);
|
||||
_astNodes.emplace_back(varNode.get());
|
||||
_varNode = varNode.release();
|
||||
}
|
||||
|
||||
EdgeConditionBuilderContainer::~EdgeConditionBuilderContainer() {
|
||||
// we have to clean up the AstNodes
|
||||
for (auto it : _astNodes) {
|
||||
delete it;
|
||||
}
|
||||
_astNodes.clear();
|
||||
}
|
||||
|
||||
AstNode* EdgeConditionBuilderContainer::createEqCheck(AstNode const* access) {
|
||||
auto node = std::make_unique<AstNode>(NODE_TYPE_OPERATOR_BINARY_EQ);
|
||||
node->reserve(2);
|
||||
node->addMember(access);
|
||||
node->addMember(_compareNode);
|
||||
_astNodes.emplace_back(node.get());
|
||||
return node.release();
|
||||
}
|
||||
|
||||
AstNode* EdgeConditionBuilderContainer::createAttributeAccess(std::string const& attr) {
|
||||
auto node = std::make_unique<AstNode>(NODE_TYPE_ATTRIBUTE_ACCESS);
|
||||
node->addMember(_varNode);
|
||||
node->setStringValue(attr.c_str(), attr.length());
|
||||
_astNodes.emplace_back(node.get());
|
||||
return node.release();
|
||||
}
|
||||
|
||||
void EdgeConditionBuilderContainer::buildFromCondition() {
|
||||
TRI_ASSERT(_fromCondition == nullptr);
|
||||
auto access = createAttributeAccess(StaticStrings::FromString);
|
||||
_fromCondition = createEqCheck(access);
|
||||
}
|
||||
|
||||
void EdgeConditionBuilderContainer::buildToCondition() {
|
||||
TRI_ASSERT(_toCondition == nullptr);
|
||||
auto access = createAttributeAccess(StaticStrings::ToString);
|
||||
_toCondition = createEqCheck(access);
|
||||
}
|
||||
|
||||
Variable const* EdgeConditionBuilderContainer::getVariable() const {
|
||||
return _var;
|
||||
}
|
||||
|
||||
void EdgeConditionBuilderContainer::setVertexId(std::string const& id) {
|
||||
_compareNode->setStringValue(id.c_str(), id.length());
|
||||
}
|
||||
|
||||
void Graph::insertVertexCollections(VPackSlice& arr) {
|
||||
TRI_ASSERT(arr.isArray());
|
||||
for (auto const& c : VPackArrayIterator(arr)) {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define ARANGOD_AQL_GRAPHS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/VariableGenerator.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
|
@ -82,16 +83,64 @@ class EdgeConditionBuilder {
|
|||
void addConditionPart(AstNode const*);
|
||||
|
||||
// Get the complete condition for outbound edges
|
||||
AstNode const* getOutboundCondition();
|
||||
AstNode* getOutboundCondition();
|
||||
|
||||
// Get the complete condition for inbound edges
|
||||
AstNode const* getInboundCondition();
|
||||
AstNode* getInboundCondition();
|
||||
|
||||
private:
|
||||
// Internal helper to swap _from and _to parts
|
||||
void swapSides(AstNode* condition);
|
||||
};
|
||||
|
||||
// Wrapper around EdgeConditionBuilder that takes responsibility for all
|
||||
// AstNodes created with it. Can be used outside of an AQL query.
|
||||
class EdgeConditionBuilderContainer final : public EdgeConditionBuilder {
|
||||
public:
|
||||
EdgeConditionBuilderContainer();
|
||||
|
||||
~EdgeConditionBuilderContainer();
|
||||
|
||||
// Get a pointer to the used variable
|
||||
Variable const* getVariable() const;
|
||||
|
||||
// Set the id of the searched vertex
|
||||
// NOTE: This class does not take responsiblity for the string.
|
||||
// So caller needs to make sure it does not run out of scope
|
||||
// as long as these conditions are used.
|
||||
void setVertexId(std::string const&);
|
||||
|
||||
protected:
|
||||
// Create the _fromCondition for the first time.
|
||||
void buildFromCondition() override;
|
||||
|
||||
// Create the _toCondition for the first time.
|
||||
void buildToCondition() override;
|
||||
|
||||
private:
|
||||
// Create the equality node using the given access
|
||||
AstNode* createEqCheck(AstNode const* access);
|
||||
|
||||
// Create a node with access of attr on the variable
|
||||
AstNode* createAttributeAccess(std::string const& attr);
|
||||
|
||||
private:
|
||||
// List of AstNodes this container is responsible for
|
||||
std::vector<AstNode*> _astNodes;
|
||||
|
||||
// The variable node that is used to hold the edge
|
||||
AstNode* _varNode;
|
||||
|
||||
// The value the edge is compared to
|
||||
AstNode* _compareNode;
|
||||
|
||||
// Reference to the exchangeable variable node
|
||||
Variable* _var;
|
||||
|
||||
// Reference to the VariableGenerator
|
||||
VariableGenerator _varGen;
|
||||
};
|
||||
|
||||
class Graph {
|
||||
public:
|
||||
explicit Graph(arangodb::velocypack::Slice const&);
|
||||
|
|
|
@ -75,14 +75,16 @@ struct ConstDistanceExpanderLocal {
|
|||
|
||||
void operator()(VPackSlice const& v, std::vector<VPackSlice>& resEdges,
|
||||
std::vector<VPackSlice>& neighbors) {
|
||||
TRI_ASSERT(v.isString());
|
||||
std::string id = v.copyString();
|
||||
ManagedDocumentResult* mmdr = _block->_mmdr.get();
|
||||
std::unique_ptr<arangodb::OperationCursor> edgeCursor;
|
||||
for (auto const& edgeCollection : _block->_collectionInfos) {
|
||||
TRI_ASSERT(edgeCollection != nullptr);
|
||||
if (_isReverse) {
|
||||
edgeCursor = edgeCollection->getReverseEdges(v, mmdr);
|
||||
edgeCursor = edgeCollection->getReverseEdges(id, mmdr);
|
||||
} else {
|
||||
edgeCursor = edgeCollection->getEdges(v, mmdr);
|
||||
edgeCursor = edgeCollection->getEdges(id, mmdr);
|
||||
}
|
||||
|
||||
LogicalCollection* collection = edgeCursor->collection();
|
||||
|
@ -208,15 +210,17 @@ struct EdgeWeightExpanderLocal {
|
|||
|
||||
void operator()(VPackSlice const& source,
|
||||
std::vector<ArangoDBPathFinder::Step*>& result) {
|
||||
TRI_ASSERT(source.isString());
|
||||
std::string id = source.copyString();
|
||||
ManagedDocumentResult* mmdr = _block->_mmdr.get();
|
||||
std::unique_ptr<arangodb::OperationCursor> edgeCursor;
|
||||
std::unordered_map<VPackSlice, size_t> candidates;
|
||||
for (auto const& edgeCollection : _block->_collectionInfos) {
|
||||
TRI_ASSERT(edgeCollection != nullptr);
|
||||
if (_reverse) {
|
||||
edgeCursor = edgeCollection->getReverseEdges(source, mmdr);
|
||||
edgeCursor = edgeCollection->getReverseEdges(id, mmdr);
|
||||
} else {
|
||||
edgeCursor = edgeCollection->getEdges(source, mmdr);
|
||||
edgeCursor = edgeCollection->getEdges(id, mmdr);
|
||||
}
|
||||
|
||||
candidates.clear();
|
||||
|
|
|
@ -28,8 +28,10 @@
|
|||
#include "Aql/Ast.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "V8Server/V8Traverser.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
|
@ -97,26 +99,61 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id,
|
|||
|
||||
TRI_edge_direction_e baseDirection = parseDirection(direction);
|
||||
|
||||
std::unordered_map<std::string, TRI_edge_direction_e> seenCollections;
|
||||
auto addEdgeColl = [&](std::string const& n, TRI_edge_direction_e dir) -> void {
|
||||
if (dir == TRI_EDGE_ANY) {
|
||||
_directions.emplace_back(TRI_EDGE_OUT);
|
||||
_edgeColls.emplace_back(n);
|
||||
|
||||
_directions.emplace_back(TRI_EDGE_IN);
|
||||
_edgeColls.emplace_back(std::move(n));
|
||||
} else {
|
||||
_directions.emplace_back(dir);
|
||||
_edgeColls.emplace_back(std::move(n));
|
||||
}
|
||||
};
|
||||
|
||||
auto ci = ClusterInfo::instance();
|
||||
|
||||
if (graph->type == NODE_TYPE_COLLECTION_LIST) {
|
||||
size_t edgeCollectionCount = graph->numMembers();
|
||||
auto resolver = std::make_unique<CollectionNameResolver>(vocbase);
|
||||
_graphInfo.openArray();
|
||||
_edgeColls.reserve(edgeCollectionCount);
|
||||
_directions.reserve(edgeCollectionCount);
|
||||
|
||||
// List of edge collection names
|
||||
for (size_t i = 0; i < edgeCollectionCount; ++i) {
|
||||
TRI_edge_direction_e dir = TRI_EDGE_ANY;
|
||||
auto col = graph->getMember(i);
|
||||
|
||||
if (col->type == NODE_TYPE_DIRECTION) {
|
||||
TRI_ASSERT(col->numMembers() == 2);
|
||||
auto dirNode = col->getMember(0);
|
||||
// We have a collection with special direction.
|
||||
TRI_ASSERT(col->getMember(0)->isIntValue());
|
||||
TRI_edge_direction_e dir = parseDirection(col->getMember(0)->getIntValue());
|
||||
_directions.emplace_back(dir);
|
||||
TRI_ASSERT(dirNode->isIntValue());
|
||||
dir = parseDirection(dirNode->getIntValue());
|
||||
col = col->getMember(1);
|
||||
} else {
|
||||
_directions.emplace_back(baseDirection);
|
||||
dir = baseDirection;
|
||||
}
|
||||
|
||||
|
||||
std::string eColName = col->getString();
|
||||
|
||||
// now do some uniqueness checks for the specified collections
|
||||
auto it = seenCollections.find(eColName);
|
||||
if (it != seenCollections.end()) {
|
||||
if ((*it).second != dir) {
|
||||
std::string msg("conflicting directions specified for collection '" +
|
||||
std::string(eColName));
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID,
|
||||
msg);
|
||||
}
|
||||
// do not re-add the same collection!
|
||||
continue;
|
||||
}
|
||||
seenCollections.emplace(eColName, dir);
|
||||
|
||||
auto eColType = resolver->getCollectionTypeCluster(eColName);
|
||||
if (eColType != TRI_COL_TYPE_EDGE) {
|
||||
std::string msg("collection type invalid for collection '" +
|
||||
|
@ -125,10 +162,23 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id,
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID,
|
||||
msg);
|
||||
}
|
||||
_graphInfo.add(VPackValue(eColName));
|
||||
_edgeColls.emplace_back(std::move(eColName));
|
||||
}
|
||||
|
||||
_graphInfo.add(VPackValue(eColName));
|
||||
if (ServerState::instance()->isRunningInCluster()) {
|
||||
auto c = ci->getCollection(_vocbase->name(), eColName);
|
||||
if (!c->isSmart()) {
|
||||
addEdgeColl(eColName, dir);
|
||||
} else {
|
||||
std::vector<std::string> names;
|
||||
names = c->realNamesForRead();
|
||||
for (auto const& name : names) {
|
||||
addEdgeColl(name, dir);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addEdgeColl(eColName, dir);
|
||||
}
|
||||
}
|
||||
_graphInfo.close();
|
||||
} else {
|
||||
if (_edgeColls.empty()) {
|
||||
|
@ -150,8 +200,20 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id,
|
|||
_directions.reserve(length);
|
||||
|
||||
for (const auto& n : eColls) {
|
||||
_edgeColls.push_back(n);
|
||||
_directions.emplace_back(baseDirection);
|
||||
if (ServerState::instance()->isRunningInCluster()) {
|
||||
auto c = ci->getCollection(_vocbase->name(), n);
|
||||
if (!c->isSmart()) {
|
||||
addEdgeColl(n, baseDirection);
|
||||
} else {
|
||||
std::vector<std::string> names;
|
||||
names = c->realNamesForRead();
|
||||
for (auto const& name : names) {
|
||||
addEdgeColl(name, baseDirection);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addEdgeColl(n, baseDirection);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -982,25 +982,28 @@ void TraversalNode::prepareOptions() {
|
|||
// We now have to check if we need _from / _to inside the index lookup and which position
|
||||
// it is used in. Such that the traverser can update the respective string value
|
||||
// in-place
|
||||
// TODO This place can be optimized.
|
||||
if (info.idxHandles[0].isMMFilesEdgeIndex()) {
|
||||
// Special case for edge index....
|
||||
// It serves two attributes, but can only be asked for one of them...
|
||||
info.conditionNeedUpdate = true;
|
||||
info.conditionMemberToUpdate = 0;
|
||||
} else {
|
||||
std::vector<std::vector<std::string>> fieldNames =
|
||||
info.idxHandles[0].fieldNames();
|
||||
size_t max = info.indexCondition->numMembers();
|
||||
TRI_ASSERT(max <= fieldNames.size());
|
||||
for (size_t i = 0; i < max; ++i) {
|
||||
auto const& f = fieldNames[i];
|
||||
if (f.size() == 1 && f[0] == usedField) {
|
||||
// we only work for _from and _to not _from.foo which would be null anyways...
|
||||
std::pair<Variable const*, std::vector<basics::AttributeName>> pathCmp;
|
||||
for (size_t i = 0; i < info.indexCondition->numMembers(); ++i) {
|
||||
// We search through the nary-and and look for EQ - _from/_to
|
||||
auto eq = info.indexCondition->getMemberUnchecked(i);
|
||||
if (eq->type != NODE_TYPE_OPERATOR_BINARY_EQ) {
|
||||
// No equality. Skip
|
||||
continue;
|
||||
}
|
||||
TRI_ASSERT(eq->numMembers() == 2);
|
||||
// It is sufficient to only check member one.
|
||||
// We build the condition this way.
|
||||
auto mem = eq->getMemberUnchecked(0);
|
||||
if (mem->isAttributeAccessForVariable(pathCmp)) {
|
||||
if (pathCmp.first != _tmpObjVariable) {
|
||||
continue;
|
||||
}
|
||||
if (pathCmp.second.size() == 1 && pathCmp.second[0].name == usedField) {
|
||||
info.conditionNeedUpdate = true;
|
||||
info.conditionMemberToUpdate = i;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
_options->_baseLookupInfos.emplace_back(std::move(info));
|
||||
|
@ -1052,26 +1055,31 @@ void TraversalNode::prepareOptions() {
|
|||
// We now have to check if we need _from / _to inside the index lookup and which position
|
||||
// it is used in. Such that the traverser can update the respective string value
|
||||
// in-place
|
||||
// TODO This place can be optimized.
|
||||
if (info.idxHandles[0].isMMFilesEdgeIndex()) {
|
||||
// Special case for edge index....
|
||||
// It serves two attributes, but can only be asked for one of them...
|
||||
info.conditionNeedUpdate = true;
|
||||
info.conditionMemberToUpdate = 0;
|
||||
} else {
|
||||
std::vector<std::vector<std::string>> fieldNames =
|
||||
info.idxHandles[0].fieldNames();
|
||||
for (size_t i = 0; i < fieldNames.size(); ++i) {
|
||||
auto f = fieldNames[i];
|
||||
if (f.size() == 1 && f[0] == usedField) {
|
||||
// we only work for _from and _to not _from.foo which would be null anyways...
|
||||
|
||||
std::pair<Variable const*, std::vector<basics::AttributeName>> pathCmp;
|
||||
for (size_t i = 0; i < info.indexCondition->numMembers(); ++i) {
|
||||
// We search through the nary-and and look for EQ - _from/_to
|
||||
auto eq = info.indexCondition->getMemberUnchecked(i);
|
||||
if (eq->type != NODE_TYPE_OPERATOR_BINARY_EQ) {
|
||||
// No equality. Skip
|
||||
continue;
|
||||
}
|
||||
TRI_ASSERT(eq->numMembers() == 2);
|
||||
// It is sufficient to only check member one.
|
||||
// We build the condition this way.
|
||||
auto mem = eq->getMemberUnchecked(0);
|
||||
if (mem->isAttributeAccessForVariable(pathCmp)) {
|
||||
if (pathCmp.first != _tmpObjVariable) {
|
||||
continue;
|
||||
}
|
||||
if (pathCmp.second.size() == 1 && pathCmp.second[0].name == usedField) {
|
||||
info.conditionNeedUpdate = true;
|
||||
info.conditionMemberToUpdate = i;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
infos.emplace_back(std::move(info));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ if (USE_MAINTAINER_MODE AND NOT MSVC)
|
|||
${FLEX_EXECUTABLE} Aql/tokens.cpp Aql/tokens.ll
|
||||
MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/Aql/tokens.ll
|
||||
VERBATIM
|
||||
)
|
||||
)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/Aql/grammar.cpp
|
||||
|
@ -28,14 +28,14 @@ if (USE_MAINTAINER_MODE AND NOT MSVC)
|
|||
${BISON_EXECUTABLE} Aql/grammar.cpp Aql/grammar.y
|
||||
MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/Aql/grammar.y
|
||||
VERBATIM
|
||||
)
|
||||
)
|
||||
|
||||
add_custom_target(clean_aql_autogenerated
|
||||
COMMAND rm -f Aql/tokens.cpp Aql/tokens.h Aql/grammar.cpp Aql/grammar.h)
|
||||
COMMAND rm -f Aql/tokens.cpp Aql/tokens.h Aql/grammar.cpp Aql/grammar.h
|
||||
)
|
||||
|
||||
list(APPEND CLEAN_AUTOGENERATED_FILES clean_aql_autogenerated)
|
||||
set(CLEAN_AUTOGENERATED_FILES ${CLEAN_AUTOGENERATED_FILES} PARENT_SCOPE)
|
||||
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
|
@ -43,7 +43,6 @@ endif ()
|
|||
################################################################################
|
||||
|
||||
if (MSVC)
|
||||
|
||||
generate_product_version(ProductVersionFiles
|
||||
NAME arangod
|
||||
FILE_DESCRIPTION ${ARANGODB_FRIENDLY_STRING}
|
||||
|
@ -365,12 +364,13 @@ SET(ARANGOD_SOURCES
|
|||
)
|
||||
|
||||
if (NOT MSVC)
|
||||
SET(ARANGOD_SOURCES ${ARANGOD_SOURCES} Scheduler/AcceptorUnixDomain.cpp Scheduler/SocketUnixDomain.cpp)
|
||||
set(ARANGOD_SOURCES ${ARANGOD_SOURCES} Scheduler/AcceptorUnixDomain.cpp Scheduler/SocketUnixDomain.cpp)
|
||||
endif()
|
||||
|
||||
add_executable(${BIN_ARANGOD} ${ARANGOD_SOURCES})
|
||||
|
||||
if(USE_SSL)
|
||||
target_compile_definitions(${BIN_ARANGOD} PUBLIC "ARANGODB_SSL_ENABLED=1")
|
||||
if (USE_SSL)
|
||||
target_compile_definitions(${BIN_ARANGOD} PUBLIC "ARANGODB_SSL_ENABLED=1")
|
||||
endif()
|
||||
|
||||
target_link_libraries(${BIN_ARANGOD}
|
||||
|
@ -388,7 +388,8 @@ target_link_libraries(${BIN_ARANGOD}
|
|||
|
||||
install(
|
||||
TARGETS ${BIN_ARANGOD}
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}
|
||||
)
|
||||
|
||||
install_config(arangod)
|
||||
|
||||
|
@ -404,7 +405,8 @@ endif ()
|
|||
|
||||
install_command_alias(${BIN_ARANGOD}
|
||||
${CMAKE_INSTALL_SBINDIR}
|
||||
arango-dfdb)
|
||||
arango-dfdb
|
||||
)
|
||||
|
||||
install_config(arango-dfdb)
|
||||
|
||||
|
@ -414,7 +416,8 @@ install_config(arango-dfdb)
|
|||
|
||||
install_command_alias(${BIN_ARANGOD}
|
||||
${CMAKE_INSTALL_SBINDIR}
|
||||
arango-secure-installation)
|
||||
arango-secure-installation
|
||||
)
|
||||
|
||||
install_config(arango-secure-installation)
|
||||
|
||||
|
@ -424,6 +427,7 @@ install_config(arango-secure-installation)
|
|||
|
||||
install_command_alias(${BIN_ARANGOD}
|
||||
${CMAKE_INSTALL_SBINDIR}
|
||||
arango-init-database)
|
||||
arango-init-database
|
||||
)
|
||||
|
||||
install_config(arango-init-database)
|
||||
|
|
|
@ -274,13 +274,6 @@ class Index {
|
|||
arangodb::aql::Variable const*,
|
||||
bool) const;
|
||||
|
||||
virtual IndexIterator* iteratorForSlice(arangodb::Transaction*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::velocypack::Slice const,
|
||||
bool) const {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
virtual arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const;
|
||||
|
||||
|
|
|
@ -175,52 +175,6 @@ void MMFilesEdgeIndexIterator::reset() {
|
|||
_lastElement = MMFilesSimpleIndexElement();
|
||||
}
|
||||
|
||||
AnyDirectionMMFilesEdgeIndexIterator::AnyDirectionMMFilesEdgeIndexIterator(LogicalCollection* collection,
|
||||
arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesEdgeIndex const* index,
|
||||
MMFilesEdgeIndexIterator* outboundIterator,
|
||||
MMFilesEdgeIndexIterator* inboundIterator)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_outbound(outboundIterator),
|
||||
_inbound(inboundIterator),
|
||||
_useInbound(false) {}
|
||||
|
||||
bool AnyDirectionMMFilesEdgeIndexIterator::next(TokenCallback const& cb,
|
||||
size_t limit) {
|
||||
auto inWrapper = [&](DocumentIdentifierToken const& res) {
|
||||
if (_seen.find(res) == _seen.end()) {
|
||||
--limit;
|
||||
cb(res);
|
||||
}
|
||||
};
|
||||
|
||||
auto outWrapper = [&](DocumentIdentifierToken const& res) {
|
||||
_seen.emplace(res);
|
||||
--limit;
|
||||
cb(res);
|
||||
};
|
||||
|
||||
while (limit > 0) {
|
||||
if (_useInbound) {
|
||||
return _inbound->next(inWrapper, limit);
|
||||
} else {
|
||||
_outbound->next(outWrapper, limit);
|
||||
if (limit > 0) {
|
||||
_useInbound = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void AnyDirectionMMFilesEdgeIndexIterator::reset() {
|
||||
_useInbound = false;
|
||||
_seen.clear();
|
||||
_outbound->reset();
|
||||
_inbound->reset();
|
||||
}
|
||||
|
||||
MMFilesEdgeIndex::MMFilesEdgeIndex(TRI_idx_iid_t iid, arangodb::LogicalCollection* collection)
|
||||
: Index(iid, collection,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>>(
|
||||
|
@ -653,70 +607,6 @@ void MMFilesEdgeIndex::expandInSearchValues(VPackSlice const slice,
|
|||
builder.close();
|
||||
}
|
||||
|
||||
/// @brief creates an IndexIterator for the given VelocyPackSlices.
|
||||
/// The searchValue is a an Array with exactly two Entries.
|
||||
/// If the first is set it means we are searching for _from (OUTBOUND),
|
||||
/// if the second is set we are searching for _to (INBOUND).
|
||||
/// if both are set we are search for ANY direction. Result is made
|
||||
/// DISTINCT.
|
||||
/// Each defined slice that is set has to be list of keys to search for.
|
||||
/// Each key needs to have the following formats:
|
||||
///
|
||||
/// 1) {"eq": <compareValue>} // The value in index is exactly this
|
||||
///
|
||||
/// Reverse is not supported, hence ignored
|
||||
/// NOTE: The iterator is only valid as long as the slice points to
|
||||
/// a valid memory region.
|
||||
IndexIterator* MMFilesEdgeIndex::iteratorForSlice(
|
||||
arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::velocypack::Slice const searchValues, bool) const {
|
||||
if (!searchValues.isArray() || searchValues.length() != 2) {
|
||||
// Invalid searchValue
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
VPackArrayIterator it(searchValues);
|
||||
TRI_ASSERT(it.valid());
|
||||
|
||||
VPackSlice const from = it.value();
|
||||
|
||||
it.next();
|
||||
TRI_ASSERT(it.valid());
|
||||
VPackSlice const to = it.value();
|
||||
|
||||
if (!from.isNull()) {
|
||||
TRI_ASSERT(from.isArray());
|
||||
if (!to.isNull()) {
|
||||
// ANY search
|
||||
TRI_ASSERT(to.isArray());
|
||||
TransactionBuilderLeaser fromBuilder(trx);
|
||||
std::unique_ptr<VPackBuilder> fromKeys(fromBuilder.steal());
|
||||
fromKeys->add(from);
|
||||
auto left = std::make_unique<MMFilesEdgeIndexIterator>(_collection, trx, mmdr, this, _edgesFrom, fromKeys);
|
||||
|
||||
TransactionBuilderLeaser toBuilder(trx);
|
||||
std::unique_ptr<VPackBuilder> toKeys(toBuilder.steal());
|
||||
toKeys->add(to);
|
||||
auto right = std::make_unique<MMFilesEdgeIndexIterator>(_collection, trx, mmdr, this, _edgesTo, toKeys);
|
||||
return new AnyDirectionMMFilesEdgeIndexIterator(_collection, trx, mmdr, this, left.release(), right.release());
|
||||
}
|
||||
// OUTBOUND search
|
||||
TRI_ASSERT(to.isNull());
|
||||
TransactionBuilderLeaser builder(trx);
|
||||
std::unique_ptr<VPackBuilder> keys(builder.steal());
|
||||
keys->add(from);
|
||||
return new MMFilesEdgeIndexIterator(_collection, trx, mmdr, this, _edgesFrom, keys);
|
||||
} else {
|
||||
// INBOUND search
|
||||
TRI_ASSERT(to.isArray());
|
||||
TransactionBuilderLeaser builder(trx);
|
||||
std::unique_ptr<VPackBuilder> keys(builder.steal());
|
||||
keys->add(to);
|
||||
return new MMFilesEdgeIndexIterator(_collection, trx, mmdr, this, _edgesTo, keys);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief create the iterator
|
||||
IndexIterator* MMFilesEdgeIndex::createEqIterator(
|
||||
arangodb::Transaction* trx,
|
||||
|
|
|
@ -71,33 +71,6 @@ class MMFilesEdgeIndexIterator final : public IndexIterator {
|
|||
MMFilesSimpleIndexElement _lastElement;
|
||||
};
|
||||
|
||||
class AnyDirectionMMFilesEdgeIndexIterator final : public IndexIterator {
|
||||
public:
|
||||
AnyDirectionMMFilesEdgeIndexIterator(LogicalCollection* collection,
|
||||
arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesEdgeIndex const* index,
|
||||
MMFilesEdgeIndexIterator* outboundIterator,
|
||||
MMFilesEdgeIndexIterator* inboundIterator);
|
||||
|
||||
~AnyDirectionMMFilesEdgeIndexIterator() {
|
||||
delete _outbound;
|
||||
delete _inbound;
|
||||
}
|
||||
|
||||
char const* typeName() const override { return "any-edge-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
void reset() override;
|
||||
|
||||
private:
|
||||
MMFilesEdgeIndexIterator* _outbound;
|
||||
MMFilesEdgeIndexIterator* _inbound;
|
||||
std::unordered_set<DocumentIdentifierToken> _seen;
|
||||
bool _useInbound;
|
||||
};
|
||||
|
||||
class MMFilesEdgeIndex final : public Index {
|
||||
public:
|
||||
MMFilesEdgeIndex() = delete;
|
||||
|
@ -178,24 +151,6 @@ class MMFilesEdgeIndex final : public Index {
|
|||
void expandInSearchValues(arangodb::velocypack::Slice const,
|
||||
arangodb::velocypack::Builder&) const override;
|
||||
|
||||
/// @brief creates an IndexIterator for the given VelocyPackSlices.
|
||||
/// The searchValue is a an Array with exactly two Entries, one of them
|
||||
/// has to be NONE.
|
||||
/// If the first is set it means we are searching for _from (OUTBOUND),
|
||||
/// if the second is set we are searching for _to (INBOUND).
|
||||
/// The slice that is set has to be list of keys to search for.
|
||||
/// Each key needs to have the following formats:
|
||||
///
|
||||
/// 1) {"eq": <compareValue>} // The value in index is exactly this
|
||||
///
|
||||
/// Reverse is not supported, hence ignored
|
||||
/// NOTE: The iterator is only valid as long as the slice points to
|
||||
/// a valid memory region.
|
||||
IndexIterator* iteratorForSlice(arangodb::Transaction*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::velocypack::Slice const,
|
||||
bool) const override;
|
||||
|
||||
private:
|
||||
/// @brief create the iterator
|
||||
IndexIterator* createEqIterator(arangodb::Transaction*,
|
||||
|
|
|
@ -982,22 +982,6 @@ IndexIterator* MMFilesHashIndex::iteratorForCondition(
|
|||
return new MMFilesHashIndexIterator(_collection, trx, mmdr, this, node, reference);
|
||||
}
|
||||
|
||||
/// @brief creates an IndexIterator for the given VelocyPackSlices
|
||||
IndexIterator* MMFilesHashIndex::iteratorForSlice(arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
VPackSlice const searchValues,
|
||||
bool) const {
|
||||
if (!searchValues.isArray()) {
|
||||
// Invalid searchValue
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TransactionBuilderLeaser builder(trx);
|
||||
std::unique_ptr<VPackBuilder> keys(builder.steal());
|
||||
keys->add(searchValues);
|
||||
return new MMFilesHashIndexIteratorVPack(_collection, trx, mmdr, this, keys);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
arangodb::aql::AstNode* MMFilesHashIndex::specializeCondition(
|
||||
arangodb::aql::AstNode* node,
|
||||
|
|
|
@ -190,17 +190,6 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
arangodb::aql::Variable const*,
|
||||
bool) const override;
|
||||
|
||||
/// @brief creates an IndexIterator for the given VelocyPackSlices
|
||||
/// Each slice represents the field at the same position. (order
|
||||
/// matters)
|
||||
/// And each slice has to be an object of one of the following types:
|
||||
/// 1) {"eq": <compareValue>} // The value in index is exactly this
|
||||
/// 2) {"in": <compareValues>} // The value in index os one of them
|
||||
IndexIterator* iteratorForSlice(arangodb::Transaction*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::velocypack::Slice const,
|
||||
bool) const override;
|
||||
|
||||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
|
||||
|
|
|
@ -448,22 +448,6 @@ IndexIterator* MMFilesPrimaryIndex::iteratorForCondition(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
/// @brief creates an IndexIterator for the given slice
|
||||
IndexIterator* MMFilesPrimaryIndex::iteratorForSlice(
|
||||
arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::velocypack::Slice const searchValues, bool) const {
|
||||
if (!searchValues.isArray()) {
|
||||
// Invalid searchValue
|
||||
return nullptr;
|
||||
}
|
||||
// lease builder, but immediately pass it to the unique_ptr so we don't leak
|
||||
TransactionBuilderLeaser builder(trx);
|
||||
std::unique_ptr<VPackBuilder> keys(builder.steal());
|
||||
keys->add(searchValues);
|
||||
return new MMFilesPrimaryIndexIterator(_collection, trx, mmdr, this, keys);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
arangodb::aql::AstNode* MMFilesPrimaryIndex::specializeCondition(
|
||||
arangodb::aql::AstNode* node,
|
||||
|
|
|
@ -202,11 +202,6 @@ class MMFilesPrimaryIndex final : public Index {
|
|||
arangodb::aql::Variable const*,
|
||||
bool) const override;
|
||||
|
||||
IndexIterator* iteratorForSlice(arangodb::Transaction*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::velocypack::Slice const,
|
||||
bool) const override;
|
||||
|
||||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
|
||||
|
|
|
@ -501,55 +501,6 @@ void SkiplistInLookupBuilder::buildSearchValues() {
|
|||
}
|
||||
|
||||
MMFilesSkiplistIterator::MMFilesSkiplistIterator(LogicalCollection* collection, arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesSkiplistIndex const* index,
|
||||
bool reverse, Node* left, Node* right)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_reverse(reverse),
|
||||
_leftEndPoint(left),
|
||||
_rightEndPoint(right) {
|
||||
reset(); // Initializes the cursor
|
||||
}
|
||||
|
||||
/// @brief Reset the cursor
|
||||
void MMFilesSkiplistIterator::reset() {
|
||||
if (_reverse) {
|
||||
_cursor = _rightEndPoint;
|
||||
} else {
|
||||
_cursor = _leftEndPoint;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// @brief Get the next elements in the skiplist
|
||||
bool MMFilesSkiplistIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
while (limit > 0) {
|
||||
if (_cursor == nullptr) {
|
||||
return false;
|
||||
}
|
||||
Node* tmp = _cursor;
|
||||
if (_reverse) {
|
||||
if (_cursor == _leftEndPoint) {
|
||||
_cursor = nullptr;
|
||||
} else {
|
||||
_cursor = _cursor->prevNode();
|
||||
}
|
||||
} else {
|
||||
if (_cursor == _rightEndPoint) {
|
||||
_cursor = nullptr;
|
||||
} else {
|
||||
_cursor = _cursor->nextNode();
|
||||
}
|
||||
}
|
||||
TRI_ASSERT(tmp != nullptr);
|
||||
TRI_ASSERT(tmp->document() != nullptr);
|
||||
cb(MMFilesToken{tmp->document()->revisionId()});
|
||||
--limit;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
MMFilesSkiplistIterator2::MMFilesSkiplistIterator2(LogicalCollection* collection, arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesSkiplistIndex const* index,
|
||||
TRI_Skiplist const* skiplist, size_t numPaths,
|
||||
|
@ -572,7 +523,7 @@ MMFilesSkiplistIterator2::MMFilesSkiplistIterator2(LogicalCollection* collection
|
|||
|
||||
/// @brief Checks if the interval is valid. It is declared invalid if
|
||||
/// one border is nullptr or the right is lower than left.
|
||||
bool MMFilesSkiplistIterator2::intervalValid(void* userData, Node* left, Node* right) const {
|
||||
bool MMFilesSkiplistIterator::intervalValid(void* userData, Node* left, Node* right) const {
|
||||
if (left == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
@ -591,7 +542,7 @@ bool MMFilesSkiplistIterator2::intervalValid(void* userData, Node* left, Node* r
|
|||
}
|
||||
|
||||
/// @brief Reset the cursor
|
||||
void MMFilesSkiplistIterator2::reset() {
|
||||
void MMFilesSkiplistIterator::reset() {
|
||||
// If _intervals is empty at this point
|
||||
// the cursor does not contain any
|
||||
// document at all. Reset is pointless
|
||||
|
@ -606,7 +557,7 @@ void MMFilesSkiplistIterator2::reset() {
|
|||
}
|
||||
}
|
||||
|
||||
bool MMFilesSkiplistIterator2::next(TokenCallback const& cb, size_t limit) {
|
||||
bool MMFilesSkiplistIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
while (limit > 0) {
|
||||
if (_cursor == nullptr) {
|
||||
// We are exhausted already, sorry
|
||||
|
@ -636,7 +587,7 @@ bool MMFilesSkiplistIterator2::next(TokenCallback const& cb, size_t limit) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void MMFilesSkiplistIterator2::forwardCursor() {
|
||||
void MMFilesSkiplistIterator::forwardCursor() {
|
||||
_currentInterval++;
|
||||
if (_currentInterval < _intervals.size()) {
|
||||
auto const& interval = _intervals[_currentInterval];
|
||||
|
@ -653,7 +604,7 @@ void MMFilesSkiplistIterator2::forwardCursor() {
|
|||
}
|
||||
}
|
||||
|
||||
void MMFilesSkiplistIterator2::initNextInterval() {
|
||||
void MMFilesSkiplistIterator::initNextInterval() {
|
||||
// We will always point the cursor to the resulting interval if any.
|
||||
// We do not take responsibility for the Nodes!
|
||||
Node* rightBorder = nullptr;
|
||||
|
@ -1285,15 +1236,15 @@ IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
|
|||
if (usesIn) {
|
||||
auto builder = std::make_unique<SkiplistInLookupBuilder>(
|
||||
trx, mapping, reference, reverse);
|
||||
return new MMFilesSkiplistIterator2(_collection, trx, mmdr, this,
|
||||
_skiplistIndex, numPaths(), CmpElmElm,
|
||||
reverse, builder.release());
|
||||
return new MMFilesSkiplistIterator(_collection, trx, mmdr, this,
|
||||
_skiplistIndex, numPaths(), CmpElmElm,
|
||||
reverse, builder.release());
|
||||
}
|
||||
auto builder =
|
||||
std::make_unique<SkiplistLookupBuilder>(trx, mapping, reference, reverse);
|
||||
return new MMFilesSkiplistIterator2(_collection, trx, mmdr, this,
|
||||
_skiplistIndex, numPaths(), CmpElmElm,
|
||||
reverse, builder.release());
|
||||
return new MMFilesSkiplistIterator(_collection, trx, mmdr, this,
|
||||
_skiplistIndex, numPaths(), CmpElmElm,
|
||||
reverse, builder.release());
|
||||
}
|
||||
|
||||
bool MMFilesSkiplistIndex::supportsFilterCondition(
|
||||
|
|
|
@ -167,49 +167,6 @@ class SkiplistInLookupBuilder : public BaseSkiplistLookupBuilder {
|
|||
/// can be NULL. Note that it is ensured that all intervals in an iterator
|
||||
/// are non-empty.
|
||||
class MMFilesSkiplistIterator final : public IndexIterator {
|
||||
private:
|
||||
friend class MMFilesSkiplistIndex;
|
||||
|
||||
private:
|
||||
// Shorthand for the skiplist node
|
||||
typedef MMFilesSkiplistNode<VPackSlice, MMFilesSkiplistIndexElement> Node;
|
||||
|
||||
private:
|
||||
bool _reverse;
|
||||
Node* _cursor;
|
||||
|
||||
Node* _leftEndPoint; // Interval left border, first excluded element
|
||||
Node* _rightEndPoint; // Interval right border, first excluded element
|
||||
|
||||
public:
|
||||
MMFilesSkiplistIterator(LogicalCollection* collection, arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesSkiplistIndex const* index,
|
||||
bool reverse, Node* left, Node* right);
|
||||
|
||||
// always holds the last node returned, initially equal to
|
||||
// the _leftEndPoint (or the
|
||||
// _rightEndPoint in the reverse case),
|
||||
// can be nullptr if the iterator is exhausted.
|
||||
|
||||
public:
|
||||
char const* typeName() const override { return "skiplist-index-iterator"; }
|
||||
|
||||
/// @brief Get the next elements in the skiplist
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
/// @brief Reset the cursor
|
||||
void reset() override;
|
||||
};
|
||||
|
||||
/// @brief Iterator structure for skip list. We require a start and stop node
|
||||
///
|
||||
/// Intervals are open in the sense that both end points are not members
|
||||
/// of the interval. This means that one has to use MMFilesSkiplist::nextNode
|
||||
/// on the start node to get the first element and that the stop node
|
||||
/// can be NULL. Note that it is ensured that all intervals in an iterator
|
||||
/// are non-empty.
|
||||
class MMFilesSkiplistIterator2 final : public IndexIterator {
|
||||
private:
|
||||
// Shorthand for the skiplist node
|
||||
typedef MMFilesSkiplistNode<VPackSlice, MMFilesSkiplistIndexElement> Node;
|
||||
|
@ -235,7 +192,7 @@ class MMFilesSkiplistIterator2 final : public IndexIterator {
|
|||
MMFilesSkiplistCmpType)> _CmpElmElm;
|
||||
|
||||
public:
|
||||
MMFilesSkiplistIterator2(LogicalCollection* collection, arangodb::Transaction* trx,
|
||||
MMFilesSkiplistIterator(LogicalCollection* collection, arangodb::Transaction* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::MMFilesSkiplistIndex const* index,
|
||||
TRI_Skiplist const* skiplist, size_t numPaths,
|
||||
|
@ -243,7 +200,7 @@ class MMFilesSkiplistIterator2 final : public IndexIterator {
|
|||
MMFilesSkiplistCmpType)> const& CmpElmElm,
|
||||
bool reverse, BaseSkiplistLookupBuilder* builder);
|
||||
|
||||
~MMFilesSkiplistIterator2() {
|
||||
~MMFilesSkiplistIterator() {
|
||||
delete _builder;
|
||||
}
|
||||
|
||||
|
@ -254,7 +211,7 @@ class MMFilesSkiplistIterator2 final : public IndexIterator {
|
|||
|
||||
public:
|
||||
|
||||
char const* typeName() const override { return "skiplist-index-iterator2"; }
|
||||
char const* typeName() const override { return "skiplist-index-iterator"; }
|
||||
|
||||
/// @brief Get the next elements in the skiplist
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
@ -303,7 +260,6 @@ class MMFilesSkiplistIndex final : public MMFilesPathBasedIndex {
|
|||
MMFilesSkiplistIndex* _idx;
|
||||
};
|
||||
|
||||
friend class MMFilesSkiplistIterator;
|
||||
friend struct KeyElementComparator;
|
||||
friend struct ElementElementComparator;
|
||||
|
||||
|
|
|
@ -66,6 +66,14 @@ class MMFilesTransactionState final : public TransactionState {
|
|||
|
||||
/// @brief add a WAL operation for a transaction collection
|
||||
int addOperation(TRI_voc_rid_t, MMFilesDocumentOperation&, MMFilesWalMarker const* marker, bool&);
|
||||
|
||||
/// @brief get the transaction id for usage in a marker
|
||||
TRI_voc_tid_t idForMarker() {
|
||||
if (isSingleOperation()) {
|
||||
return 0;
|
||||
}
|
||||
return _id;
|
||||
}
|
||||
|
||||
private:
|
||||
/// @brief whether or not a marker needs to be written
|
||||
|
|
|
@ -22,14 +22,15 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RestEdgesHandler.h"
|
||||
#include "Aql/AstNode.h"
|
||||
#include "Aql/Graphs.h"
|
||||
#include "Aql/Variable.h"
|
||||
#include "Basics/ScopeGuard.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "MMFiles/MMFilesEdgeIndex.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Utils/StandaloneTransactionContext.h"
|
||||
#include "VocBase/Traverser.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -47,13 +48,7 @@ RestStatus RestEdgesHandler::execute() {
|
|||
|
||||
// execute one of the CRUD methods
|
||||
switch (type) {
|
||||
case rest::RequestType::GET: {
|
||||
readEdges();
|
||||
break;
|
||||
}
|
||||
case rest::RequestType::PUT:
|
||||
// Now unsupported. Just temporary to check
|
||||
TRI_ASSERT(false);
|
||||
case rest::RequestType::GET:
|
||||
readEdges();
|
||||
break;
|
||||
case rest::RequestType::POST:
|
||||
|
@ -68,74 +63,106 @@ RestStatus RestEdgesHandler::execute() {
|
|||
return RestStatus::DONE;
|
||||
}
|
||||
|
||||
bool RestEdgesHandler::getEdgesForVertexList(
|
||||
VPackSlice const ids,
|
||||
TRI_edge_direction_e direction, SingleCollectionTransaction& trx,
|
||||
VPackBuilder& result, size_t& scannedIndex, size_t& filtered) {
|
||||
TRI_ASSERT(result.isOpenArray());
|
||||
TRI_ASSERT(ids.isArray());
|
||||
trx.orderDitch(trx.cid()); // will throw when it fails
|
||||
void RestEdgesHandler::readCursor(
|
||||
aql::AstNode* condition, aql::Variable const* var,
|
||||
std::string const& collectionName, SingleCollectionTransaction& trx,
|
||||
std::function<void(DocumentIdentifierToken const&)> cb) {
|
||||
Transaction::IndexHandle indexId;
|
||||
bool foundIdx = trx.getBestIndexHandleForFilterCondition(
|
||||
collectionName, condition, var, 1000, indexId);
|
||||
if (!foundIdx) {
|
||||
// Right now we enforce an edge index that can exactly! work on this condition.
|
||||
// So it is impossible to not find an index.
|
||||
TRI_ASSERT(false);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_ARANGO_NO_INDEX,
|
||||
"Unable to find an edge-index to identify matching edges.");
|
||||
}
|
||||
|
||||
std::string const collectionName =
|
||||
trx.resolver()->getCollectionName(trx.cid());
|
||||
Transaction::IndexHandle indexId = trx.edgeIndexHandle(collectionName);
|
||||
ManagedDocumentResult mmdr;
|
||||
std::unique_ptr<OperationCursor> cursor(trx.indexScanForCondition(
|
||||
indexId, condition, var, &mmdr, UINT64_MAX, 1000, false));
|
||||
|
||||
VPackBuilder searchValueBuilder;
|
||||
MMFilesEdgeIndex::buildSearchValueFromArray(direction, ids, searchValueBuilder);
|
||||
VPackSlice search = searchValueBuilder.slice();
|
||||
|
||||
std::unique_ptr<OperationCursor> cursor =
|
||||
trx.indexScan(collectionName, arangodb::Transaction::CursorType::INDEX,
|
||||
indexId, search, nullptr, 0, UINT64_MAX, 1000, false);
|
||||
if (cursor->failed()) {
|
||||
THROW_ARANGO_EXCEPTION(cursor->code);
|
||||
}
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
auto collection = trx.documentCollection();
|
||||
auto cb = [&](DocumentIdentifierToken const& token) {
|
||||
if (collection->readDocument(&trx, mmdr, token)) {
|
||||
result.add(VPackSlice(mmdr.vpack()));
|
||||
}
|
||||
scannedIndex++;
|
||||
};
|
||||
while (cursor->getMore(cb, 1000)) {
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool RestEdgesHandler::getEdgesForVertex(
|
||||
std::string const& id, std::string const& collectionName,
|
||||
TRI_edge_direction_e direction, SingleCollectionTransaction& trx,
|
||||
VPackBuilder& result, size_t& scannedIndex, size_t& filtered) {
|
||||
TRI_ASSERT(result.isOpenArray());
|
||||
std::function<void(DocumentIdentifierToken const&)> cb) {
|
||||
trx.orderDitch(trx.cid()); // will throw when it fails
|
||||
|
||||
Transaction::IndexHandle indexId = trx.edgeIndexHandle(collectionName);
|
||||
// Create a conditionBuilder that manages the AstNodes for querying
|
||||
aql::EdgeConditionBuilderContainer condBuilder;
|
||||
condBuilder.setVertexId(id);
|
||||
|
||||
VPackBuilder searchValueBuilder;
|
||||
MMFilesEdgeIndex::buildSearchValue(direction, id, searchValueBuilder);
|
||||
VPackSlice search = searchValueBuilder.slice();
|
||||
aql::Variable const* var = condBuilder.getVariable();
|
||||
|
||||
std::unique_ptr<OperationCursor> cursor =
|
||||
trx.indexScan(collectionName, arangodb::Transaction::CursorType::INDEX,
|
||||
indexId, search, nullptr, 0, UINT64_MAX, 1000, false);
|
||||
if (cursor->failed()) {
|
||||
THROW_ARANGO_EXCEPTION(cursor->code);
|
||||
switch (direction) {
|
||||
case TRI_EDGE_IN:
|
||||
readCursor(condBuilder.getInboundCondition(), var, collectionName, trx,
|
||||
cb);
|
||||
break;
|
||||
case TRI_EDGE_OUT:
|
||||
readCursor(condBuilder.getOutboundCondition(), var, collectionName, trx,
|
||||
cb);
|
||||
break;
|
||||
case TRI_EDGE_ANY:
|
||||
// We have to call both directions
|
||||
readCursor(condBuilder.getInboundCondition(), var, collectionName, trx,
|
||||
cb);
|
||||
readCursor(condBuilder.getOutboundCondition(), var, collectionName, trx,
|
||||
cb);
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RestEdgesHandler::parseDirection(TRI_edge_direction_e& direction) {
|
||||
bool found;
|
||||
std::string dir = _request->value("direction", found);
|
||||
|
||||
if (!found || dir.empty()) {
|
||||
dir = "any";
|
||||
}
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
auto collection = trx.documentCollection();
|
||||
auto cb = [&] (DocumentIdentifierToken const& token) {
|
||||
if (collection->readDocument(&trx, mmdr, token)) {
|
||||
result.add(VPackSlice(mmdr.vpack()));
|
||||
}
|
||||
scannedIndex++;
|
||||
};
|
||||
while (cursor->getMore(cb, 1000)) {
|
||||
std::string dirString(dir);
|
||||
|
||||
if (dirString == "any") {
|
||||
direction = TRI_EDGE_ANY;
|
||||
} else if (dirString == "out" || dirString == "outbound") {
|
||||
direction = TRI_EDGE_OUT;
|
||||
} else if (dirString == "in" || dirString == "inbound") {
|
||||
direction = TRI_EDGE_IN;
|
||||
} else {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"<direction> must by any, in, or out, not: " + dirString);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RestEdgesHandler::validateCollection(std::string const& name) {
|
||||
CollectionNameResolver resolver(_vocbase);
|
||||
TRI_col_type_e colType = resolver.getCollectionTypeCluster(name);
|
||||
if (colType == TRI_COL_TYPE_UNKNOWN) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (colType != TRI_COL_TYPE_EDGE) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -153,43 +180,16 @@ bool RestEdgesHandler::readEdges() {
|
|||
}
|
||||
|
||||
std::string collectionName = suffixes[0];
|
||||
CollectionNameResolver resolver(_vocbase);
|
||||
TRI_col_type_e colType = resolver.getCollectionTypeCluster(collectionName);
|
||||
if (colType == TRI_COL_TYPE_UNKNOWN) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
if (!validateCollection(collectionName)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (colType != TRI_COL_TYPE_EDGE) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
TRI_edge_direction_e direction;
|
||||
if (!parseDirection(direction)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool found;
|
||||
std::string dir = _request->value("direction", found);
|
||||
|
||||
if (!found || dir.empty()) {
|
||||
dir = "any";
|
||||
}
|
||||
|
||||
std::string dirString(dir);
|
||||
TRI_edge_direction_e direction;
|
||||
|
||||
if (dirString == "any") {
|
||||
direction = TRI_EDGE_ANY;
|
||||
} else if (dirString == "out" || dirString == "outbound") {
|
||||
direction = TRI_EDGE_OUT;
|
||||
} else if (dirString == "in" || dirString == "inbound") {
|
||||
direction = TRI_EDGE_IN;
|
||||
} else {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"<direction> must by any, in, or out, not: " + dirString);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string const& startVertex = _request->value("vertex", found);
|
||||
|
||||
if (!found || startVertex.empty()) {
|
||||
|
@ -245,9 +245,23 @@ bool RestEdgesHandler::readEdges() {
|
|||
// build edges
|
||||
resultBuilder.add(VPackValue("edges")); // only key
|
||||
resultBuilder.openArray();
|
||||
// NOTE: collecitonName is the shard-name in DBServer case
|
||||
bool ok = getEdgesForVertex(startVertex, collectionName, direction, trx,
|
||||
resultBuilder, scannedIndex, filtered);
|
||||
|
||||
auto collection = trx.documentCollection();
|
||||
ManagedDocumentResult mmdr;
|
||||
std::unordered_set<DocumentIdentifierToken> foundTokens;
|
||||
auto cb = [&] (DocumentIdentifierToken const& token) {
|
||||
if (foundTokens.find(token) == foundTokens.end()) {
|
||||
if (collection->readDocument(&trx, mmdr, token)) {
|
||||
resultBuilder.add(VPackSlice(mmdr.vpack()));
|
||||
}
|
||||
scannedIndex++;
|
||||
// Mark edges we find
|
||||
foundTokens.emplace(token);
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: collectionName is the shard-name in DBServer case
|
||||
bool ok = getEdgesForVertex(startVertex, collectionName, direction, trx, cb);
|
||||
resultBuilder.close();
|
||||
|
||||
res = trx.finish(res);
|
||||
|
@ -317,38 +331,12 @@ bool RestEdgesHandler::readEdgesForMultipleVertices() {
|
|||
}
|
||||
|
||||
std::string collectionName = suffixes[0];
|
||||
CollectionNameResolver resolver(_vocbase);
|
||||
TRI_col_type_e colType = resolver.getCollectionTypeCluster(collectionName);
|
||||
|
||||
if (colType == TRI_COL_TYPE_UNKNOWN) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND,
|
||||
TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
|
||||
if (!validateCollection(collectionName)) {
|
||||
return false;
|
||||
} else if (colType != TRI_COL_TYPE_EDGE) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool found;
|
||||
std::string dirString = _request->value("direction", found);
|
||||
|
||||
if (!found || dirString.empty()) {
|
||||
dirString = "any";
|
||||
}
|
||||
|
||||
TRI_edge_direction_e direction;
|
||||
|
||||
if (dirString == "any") {
|
||||
direction = TRI_EDGE_ANY;
|
||||
} else if (dirString == "out" || dirString == "outbound") {
|
||||
direction = TRI_EDGE_OUT;
|
||||
} else if (dirString == "in" || dirString == "inbound") {
|
||||
direction = TRI_EDGE_IN;
|
||||
} else {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"<direction> must by any, in, or out, not: " + dirString);
|
||||
if (!parseDirection(direction)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -393,11 +381,6 @@ bool RestEdgesHandler::readEdgesForMultipleVertices() {
|
|||
return false;
|
||||
}
|
||||
|
||||
// If we are a DBserver, we want to use the cluster-wide collection
|
||||
// name for error reporting:
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
collectionName = trx.resolver()->getCollectionName(trx.cid());
|
||||
}
|
||||
|
||||
size_t filtered = 0;
|
||||
size_t scannedIndex = 0;
|
||||
|
@ -408,17 +391,37 @@ bool RestEdgesHandler::readEdgesForMultipleVertices() {
|
|||
resultBuilder.add(VPackValue("edges")); // only key
|
||||
resultBuilder.openArray();
|
||||
|
||||
bool ok = getEdgesForVertexList(body, direction, trx, resultBuilder,
|
||||
scannedIndex, filtered);
|
||||
auto collection = trx.documentCollection();
|
||||
ManagedDocumentResult mmdr;
|
||||
std::unordered_set<DocumentIdentifierToken> foundTokens;
|
||||
auto cb = [&] (DocumentIdentifierToken const& token) {
|
||||
if (foundTokens.find(token) == foundTokens.end()) {
|
||||
if (collection->readDocument(&trx, mmdr, token)) {
|
||||
resultBuilder.add(VPackSlice(mmdr.vpack()));
|
||||
}
|
||||
scannedIndex++;
|
||||
// Mark edges we find
|
||||
foundTokens.emplace(token);
|
||||
}
|
||||
};
|
||||
|
||||
if (!ok) {
|
||||
// Ignore the error
|
||||
for (auto const& it : VPackArrayIterator(body)) {
|
||||
if (it.isString()) {
|
||||
std::string startVertex = it.copyString();
|
||||
|
||||
// We ignore if this fails
|
||||
getEdgesForVertex(startVertex, collectionName, direction, trx, cb);
|
||||
}
|
||||
}
|
||||
|
||||
resultBuilder.close();
|
||||
|
||||
res = trx.finish(res);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (ServerState::instance()->isDBServer()) {
|
||||
// If we are a DBserver, we want to use the cluster-wide collection
|
||||
// name for error reporting:
|
||||
collectionName = trx.resolver()->getCollectionName(trx.cid());
|
||||
}
|
||||
generateTransactionError(collectionName, res, "");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -30,8 +30,14 @@
|
|||
#include <velocypack/Builder.h>
|
||||
|
||||
namespace arangodb {
|
||||
struct DocumentIdentifierToken;
|
||||
class SingleCollectionTransaction;
|
||||
|
||||
namespace aql {
|
||||
struct AstNode;
|
||||
struct Variable;
|
||||
}
|
||||
|
||||
class RestEdgesHandler : public RestVocbaseBaseHandler {
|
||||
public:
|
||||
explicit RestEdgesHandler(GeneralRequest*, GeneralResponse*);
|
||||
|
@ -48,11 +54,20 @@ class RestEdgesHandler : public RestVocbaseBaseHandler {
|
|||
bool readEdges();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief reads all edges in given direction for a given list of vertices
|
||||
/// @brief reads all edges in given direction for a list of vertices
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool readEdgesForMultipleVertices();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief find the index and read it completely with the given callback
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void readCursor(aql::AstNode* condition, aql::Variable const* var,
|
||||
std::string const& collectionName,
|
||||
SingleCollectionTransaction& trx,
|
||||
std::function<void(DocumentIdentifierToken const&)> cb);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get all edges for a given vertex. Independent from the request
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -60,16 +75,19 @@ class RestEdgesHandler : public RestVocbaseBaseHandler {
|
|||
bool getEdgesForVertex(
|
||||
std::string const& id, std::string const& collectionName,
|
||||
TRI_edge_direction_e direction, SingleCollectionTransaction& trx,
|
||||
arangodb::velocypack::Builder&, size_t& scannedIndex, size_t& filtered);
|
||||
std::function<void(DocumentIdentifierToken const&)> cb);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get all edges for a list of vertices. Independent from the request
|
||||
/// @brief Parse the direction parameter
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool getEdgesForVertexList(
|
||||
arangodb::velocypack::Slice const ids,
|
||||
TRI_edge_direction_e direction, SingleCollectionTransaction& trx,
|
||||
arangodb::velocypack::Builder&, size_t& scannedIndex, size_t& filtered);
|
||||
bool parseDirection(TRI_edge_direction_e& direction);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Validate that the collection exists and is an edge collection
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool validateCollection(std::string const& name);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -34,9 +34,7 @@
|
|||
#include "GeneralServer/GeneralServer.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesEdgeIndex.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "Replication/InitialSyncer.h"
|
||||
#include "Rest/HttpRequest.h"
|
||||
#include "Rest/Version.h"
|
||||
|
|
|
@ -104,14 +104,6 @@ class TransactionState {
|
|||
/// TODO: implement this in base class
|
||||
virtual bool hasFailedOperations() const = 0;
|
||||
|
||||
/// @brief get the transaction id for usage in a marker
|
||||
TRI_voc_tid_t idForMarker() {
|
||||
if (isSingleOperation()) {
|
||||
return 0;
|
||||
}
|
||||
return _id;
|
||||
}
|
||||
|
||||
protected:
|
||||
/// @brief find a collection in the transaction's list of collections
|
||||
TransactionCollection* findCollection(TRI_voc_cid_t cid, size_t& position) const;
|
||||
|
|
|
@ -73,11 +73,6 @@ std::vector<std::vector<std::string>> Transaction::IndexHandle::fieldNames() con
|
|||
return _index->fieldNames();
|
||||
}
|
||||
|
||||
/// @brief Only required by traversal should be removed ASAP
|
||||
bool Transaction::IndexHandle::isMMFilesEdgeIndex() const {
|
||||
return _index->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX;
|
||||
}
|
||||
|
||||
/// @brief IndexHandle getter method
|
||||
std::shared_ptr<arangodb::Index> Transaction::IndexHandle::getIndex() const {
|
||||
return _index;
|
||||
|
@ -1218,8 +1213,8 @@ OperationResult Transaction::anyLocal(std::string const& collectionName,
|
|||
ManagedDocumentResult mmdr;
|
||||
|
||||
std::unique_ptr<OperationCursor> cursor =
|
||||
indexScan(collectionName, Transaction::CursorType::ANY, IndexHandle(),
|
||||
{}, &mmdr, skip, limit, 1000, false);
|
||||
indexScan(collectionName, Transaction::CursorType::ANY, &mmdr, skip,
|
||||
limit, 1000, false);
|
||||
|
||||
LogicalCollection* collection = cursor->collection();
|
||||
auto cb = [&] (DocumentIdentifierToken const& token) {
|
||||
|
@ -1321,20 +1316,6 @@ std::string Transaction::collectionName(TRI_voc_cid_t cid) {
|
|||
return resolver()->getCollectionName(cid);
|
||||
}
|
||||
|
||||
/// @brief return the edge index handle of collection
|
||||
Transaction::IndexHandle Transaction::edgeIndexHandle(std::string const& collectionName) {
|
||||
if (!isEdgeCollection(collectionName)) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
}
|
||||
auto indexes = indexesForCollection(collectionName);
|
||||
for (auto idx : indexes) {
|
||||
if (idx->type() == Index::TRI_IDX_TYPE_EDGE_INDEX) {
|
||||
return IndexHandle(idx);
|
||||
}
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
}
|
||||
|
||||
/// @brief Iterate over all elements of the collection.
|
||||
void Transaction::invokeOnAllElements(std::string const& collectionName,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback) {
|
||||
|
@ -2543,8 +2524,8 @@ OperationResult Transaction::allLocal(std::string const& collectionName,
|
|||
ManagedDocumentResult mmdr;
|
||||
|
||||
std::unique_ptr<OperationCursor> cursor =
|
||||
indexScan(collectionName, Transaction::CursorType::ALL, IndexHandle(),
|
||||
{}, &mmdr, skip, limit, 1000, false);
|
||||
indexScan(collectionName, Transaction::CursorType::ALL, &mmdr, skip,
|
||||
limit, 1000, false);
|
||||
|
||||
if (cursor->failed()) {
|
||||
return OperationResult(cursor->code);
|
||||
|
@ -2974,7 +2955,6 @@ OperationCursor* Transaction::indexScanForCondition(
|
|||
/// calling this method
|
||||
std::unique_ptr<OperationCursor> Transaction::indexScan(
|
||||
std::string const& collectionName, CursorType cursorType,
|
||||
IndexHandle const& indexId, VPackSlice const search,
|
||||
ManagedDocumentResult* mmdr,
|
||||
uint64_t skip, uint64_t limit, uint64_t batchSize, bool reverse) {
|
||||
// For now we assume indexId is the iid part of the index.
|
||||
|
@ -2998,11 +2978,6 @@ std::unique_ptr<OperationCursor> Transaction::indexScan(
|
|||
|
||||
switch (cursorType) {
|
||||
case CursorType::ANY: {
|
||||
// We do not need search values
|
||||
TRI_ASSERT(search.isNone());
|
||||
// We do not need an index either
|
||||
TRI_ASSERT(nullptr == indexId.getIndex());
|
||||
|
||||
arangodb::MMFilesPrimaryIndex* idx = document->primaryIndex();
|
||||
|
||||
if (idx == nullptr) {
|
||||
|
@ -3015,11 +2990,6 @@ std::unique_ptr<OperationCursor> Transaction::indexScan(
|
|||
break;
|
||||
}
|
||||
case CursorType::ALL: {
|
||||
// We do not need search values
|
||||
TRI_ASSERT(search.isNone());
|
||||
// We do not need an index either
|
||||
TRI_ASSERT(nullptr == indexId.getIndex());
|
||||
|
||||
arangodb::MMFilesPrimaryIndex* idx = document->primaryIndex();
|
||||
|
||||
if (idx == nullptr) {
|
||||
|
@ -3031,19 +3001,6 @@ std::unique_ptr<OperationCursor> Transaction::indexScan(
|
|||
iterator.reset(idx->allIterator(this, mmdr, reverse));
|
||||
break;
|
||||
}
|
||||
case CursorType::INDEX: {
|
||||
auto idx = indexId.getIndex();
|
||||
if (nullptr == idx) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER,
|
||||
"The index id cannot be empty.");
|
||||
}
|
||||
// Normalize the search values
|
||||
// VPackBuilder expander;
|
||||
// idx->expandInSearchValues(search, expander);
|
||||
|
||||
// Now collect the Iterator
|
||||
iterator.reset(idx->iteratorForSlice(this, mmdr, search, reverse));
|
||||
}
|
||||
}
|
||||
if (iterator == nullptr) {
|
||||
// We could not create an ITERATOR and it did not throw an error itself
|
||||
|
|
|
@ -142,8 +142,6 @@ class Transaction {
|
|||
}
|
||||
std::vector<std::vector<std::string>> fieldNames() const;
|
||||
|
||||
bool isMMFilesEdgeIndex() const;
|
||||
|
||||
public:
|
||||
std::shared_ptr<arangodb::Index> getIndex() const;
|
||||
};
|
||||
|
@ -177,8 +175,7 @@ class Transaction {
|
|||
/// @brief Type of cursor
|
||||
enum class CursorType {
|
||||
ALL = 0,
|
||||
ANY,
|
||||
INDEX
|
||||
ANY
|
||||
};
|
||||
|
||||
/// @brief return database of transaction
|
||||
|
@ -319,9 +316,6 @@ class Transaction {
|
|||
/// @brief return the name of a collection
|
||||
std::string collectionName(TRI_voc_cid_t cid);
|
||||
|
||||
/// @brief return the edge index handle of collection
|
||||
IndexHandle edgeIndexHandle(std::string const&);
|
||||
|
||||
/// @brief Iterate over all elements of the collection.
|
||||
void invokeOnAllElements(std::string const& collectionName,
|
||||
std::function<bool(arangodb::DocumentIdentifierToken const&)>);
|
||||
|
@ -451,8 +445,6 @@ class Transaction {
|
|||
/// calling this method
|
||||
std::unique_ptr<OperationCursor> indexScan(std::string const& collectionName,
|
||||
CursorType cursorType,
|
||||
IndexHandle const& indexId,
|
||||
VPackSlice const search,
|
||||
ManagedDocumentResult*,
|
||||
uint64_t skip, uint64_t limit,
|
||||
uint64_t batchSize, bool reverse);
|
||||
|
|
|
@ -223,8 +223,8 @@ static void JS_AllQuery(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
|
||||
// We directly read the entire cursor. so batchsize == limit
|
||||
std::unique_ptr<OperationCursor> opCursor =
|
||||
trx.indexScan(collectionName, Transaction::CursorType::ALL,
|
||||
Transaction::IndexHandle(), {}, nullptr, skip, limit, limit, false);
|
||||
trx.indexScan(collectionName, Transaction::CursorType::ALL, nullptr, skip,
|
||||
limit, limit, false);
|
||||
|
||||
if (opCursor->failed()) {
|
||||
TRI_V8_THROW_EXCEPTION(opCursor->code);
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
|
||||
#include "EdgeCollectionInfo.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "MMFiles/MMFilesEdgeIndex.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
|
||||
using namespace arangodb::traverser;
|
||||
|
@ -34,26 +33,37 @@ EdgeCollectionInfo::EdgeCollectionInfo(arangodb::Transaction* trx,
|
|||
double defaultWeight)
|
||||
: _trx(trx),
|
||||
_collectionName(collectionName),
|
||||
_searchBuilder(),
|
||||
_weightAttribute(weightAttribute),
|
||||
_defaultWeight(defaultWeight),
|
||||
_forwardDir(direction) {
|
||||
|
||||
switch (direction) {
|
||||
case TRI_EDGE_OUT:
|
||||
_backwardDir = TRI_EDGE_IN;
|
||||
break;
|
||||
case TRI_EDGE_IN:
|
||||
_backwardDir = TRI_EDGE_OUT;
|
||||
break;
|
||||
case TRI_EDGE_ANY:
|
||||
_backwardDir = TRI_EDGE_ANY;
|
||||
break;
|
||||
}
|
||||
_dir(direction) {
|
||||
TRI_ASSERT(_dir == TRI_EDGE_OUT || _dir == TRI_EDGE_IN);
|
||||
|
||||
if (!trx->isEdgeCollection(collectionName)) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_TYPE_INVALID);
|
||||
}
|
||||
_indexId = trx->edgeIndexHandle(collectionName);
|
||||
|
||||
auto var = _searchBuilder.getVariable();
|
||||
if (_dir == TRI_EDGE_OUT) {
|
||||
auto cond = _searchBuilder.getOutboundCondition();
|
||||
bool worked = _trx->getBestIndexHandleForFilterCondition(
|
||||
_collectionName, cond, var, 1000, _forwardIndexId);
|
||||
TRI_ASSERT(worked); // We always have an edge Index
|
||||
cond = _searchBuilder.getInboundCondition();
|
||||
worked = _trx->getBestIndexHandleForFilterCondition(
|
||||
_collectionName, cond, var, 1000,
|
||||
_backwardIndexId);
|
||||
TRI_ASSERT(worked); // We always have an edge Index
|
||||
} else {
|
||||
auto cond = _searchBuilder.getInboundCondition();
|
||||
bool worked = _trx->getBestIndexHandleForFilterCondition(
|
||||
_collectionName, cond, var, 1000, _forwardIndexId);
|
||||
TRI_ASSERT(worked); // We always have an edge Index
|
||||
cond = _searchBuilder.getOutboundCondition();
|
||||
worked = _trx->getBestIndexHandleForFilterCondition(
|
||||
_collectionName, cond, var, 1000, _backwardIndexId);
|
||||
TRI_ASSERT(worked); // We always have an edge Index
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -63,21 +73,18 @@ EdgeCollectionInfo::EdgeCollectionInfo(arangodb::Transaction* trx,
|
|||
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getEdges(
|
||||
std::string const& vertexId,
|
||||
arangodb::ManagedDocumentResult* mmdr) {
|
||||
_searchBuilder.clear();
|
||||
MMFilesEdgeIndex::buildSearchValue(_forwardDir, vertexId, _searchBuilder);
|
||||
return _trx->indexScan(_collectionName,
|
||||
arangodb::Transaction::CursorType::INDEX, _indexId,
|
||||
_searchBuilder.slice(), mmdr, 0, UINT64_MAX, 1000, false);
|
||||
}
|
||||
|
||||
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getEdges(
|
||||
VPackSlice const& vertexId,
|
||||
arangodb::ManagedDocumentResult* mmdr) {
|
||||
_searchBuilder.clear();
|
||||
MMFilesEdgeIndex::buildSearchValue(_forwardDir, vertexId, _searchBuilder);
|
||||
return _trx->indexScan(_collectionName,
|
||||
arangodb::Transaction::CursorType::INDEX, _indexId,
|
||||
_searchBuilder.slice(), mmdr, 0, UINT64_MAX, 1000, false);
|
||||
_searchBuilder.setVertexId(vertexId);
|
||||
std::unique_ptr<arangodb::OperationCursor> res;
|
||||
if (_dir == TRI_EDGE_OUT) {
|
||||
res.reset(_trx->indexScanForCondition(
|
||||
_forwardIndexId, _searchBuilder.getOutboundCondition(),
|
||||
_searchBuilder.getVariable(), mmdr, UINT64_MAX, 1000, false));
|
||||
} else {
|
||||
res.reset(_trx->indexScanForCondition(
|
||||
_forwardIndexId, _searchBuilder.getInboundCondition(),
|
||||
_searchBuilder.getVariable(), mmdr, UINT64_MAX, 1000, false));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -91,7 +98,7 @@ int EdgeCollectionInfo::getEdgesCoordinator(VPackSlice const& vertexId,
|
|||
result.openObject();
|
||||
int res = getFilteredEdgesOnCoordinator(
|
||||
_trx->vocbase()->name(), _collectionName, vertexId.copyString(),
|
||||
_forwardDir, responseCode, result);
|
||||
_dir, responseCode, result);
|
||||
result.close();
|
||||
return res;
|
||||
}
|
||||
|
@ -103,21 +110,18 @@ int EdgeCollectionInfo::getEdgesCoordinator(VPackSlice const& vertexId,
|
|||
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getReverseEdges(
|
||||
std::string const& vertexId,
|
||||
arangodb::ManagedDocumentResult* mmdr) {
|
||||
_searchBuilder.clear();
|
||||
MMFilesEdgeIndex::buildSearchValue(_backwardDir, vertexId, _searchBuilder);
|
||||
return _trx->indexScan(_collectionName,
|
||||
arangodb::Transaction::CursorType::INDEX, _indexId,
|
||||
_searchBuilder.slice(), mmdr, 0, UINT64_MAX, 1000, false);
|
||||
}
|
||||
|
||||
std::unique_ptr<arangodb::OperationCursor> EdgeCollectionInfo::getReverseEdges(
|
||||
VPackSlice const& vertexId,
|
||||
arangodb::ManagedDocumentResult* mmdr) {
|
||||
_searchBuilder.clear();
|
||||
MMFilesEdgeIndex::buildSearchValue(_backwardDir, vertexId, _searchBuilder);
|
||||
return _trx->indexScan(_collectionName,
|
||||
arangodb::Transaction::CursorType::INDEX, _indexId,
|
||||
_searchBuilder.slice(), mmdr, 0, UINT64_MAX, 1000, false);
|
||||
_searchBuilder.setVertexId(vertexId);
|
||||
std::unique_ptr<arangodb::OperationCursor> res;
|
||||
if (_dir == TRI_EDGE_OUT) {
|
||||
res.reset(_trx->indexScanForCondition(
|
||||
_backwardIndexId, _searchBuilder.getInboundCondition(),
|
||||
_searchBuilder.getVariable(), mmdr, UINT64_MAX, 1000, false));
|
||||
} else {
|
||||
res.reset(_trx->indexScanForCondition(
|
||||
_backwardIndexId, _searchBuilder.getOutboundCondition(),
|
||||
_searchBuilder.getVariable(), mmdr, UINT64_MAX, 1000, false));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -129,9 +133,13 @@ int EdgeCollectionInfo::getReverseEdgesCoordinator(VPackSlice const& vertexId,
|
|||
TRI_ASSERT(result.isEmpty());
|
||||
arangodb::rest::ResponseCode responseCode;
|
||||
result.openObject();
|
||||
TRI_edge_direction_e dir = TRI_EDGE_OUT;
|
||||
if (_dir == TRI_EDGE_OUT) {
|
||||
dir = TRI_EDGE_IN;
|
||||
}
|
||||
int res = getFilteredEdgesOnCoordinator(
|
||||
_trx->vocbase()->name(), _collectionName, vertexId.copyString(),
|
||||
_backwardDir, responseCode, result);
|
||||
dir, responseCode, result);
|
||||
result.close();
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#ifndef ARANGOD_EDGE_COLLECTION_INFO_H
|
||||
#define ARANGOD_EDGE_COLLECTION_INFO_H 1
|
||||
|
||||
#include "Aql/Graphs.h"
|
||||
#include "VocBase/Traverser.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
@ -53,26 +54,24 @@ class EdgeCollectionInfo {
|
|||
|
||||
std::string _collectionName;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief index id
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief index used for forward iteration
|
||||
arangodb::Transaction::IndexHandle _forwardIndexId;
|
||||
|
||||
arangodb::Transaction::IndexHandle _indexId;
|
||||
/// @brief index used for backward iteration
|
||||
arangodb::Transaction::IndexHandle _backwardIndexId;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Temporary builder for index search values
|
||||
/// NOTE: Single search builder is NOT thread-save
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
VPackBuilder _searchBuilder;
|
||||
aql::EdgeConditionBuilderContainer _searchBuilder;
|
||||
|
||||
std::string _weightAttribute;
|
||||
|
||||
double _defaultWeight;
|
||||
|
||||
TRI_edge_direction_e _forwardDir;
|
||||
|
||||
TRI_edge_direction_e _backwardDir;
|
||||
TRI_edge_direction_e _dir;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -88,8 +87,6 @@ class EdgeCollectionInfo {
|
|||
|
||||
std::unique_ptr<arangodb::OperationCursor> getEdges(std::string const&, ManagedDocumentResult*);
|
||||
|
||||
std::unique_ptr<arangodb::OperationCursor> getEdges(arangodb::velocypack::Slice const&, ManagedDocumentResult*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Get edges for the given direction and start vertex. On Coordinator.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -101,13 +98,13 @@ class EdgeCollectionInfo {
|
|||
/// @brief Get edges for the given direction and start vertex. Reverse version
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::unique_ptr<arangodb::OperationCursor> getReverseEdges(std::string const&, ManagedDocumentResult*);
|
||||
std::unique_ptr<arangodb::OperationCursor> getReverseEdges(
|
||||
std::string const&, ManagedDocumentResult*);
|
||||
|
||||
std::unique_ptr<arangodb::OperationCursor> getReverseEdges(arangodb::velocypack::Slice const&, ManagedDocumentResult*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Get edges for the given direction and start vertex. Reverse version on Coordinator.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Get edges for the given direction and start vertex. Reverse version
|
||||
/// on Coordinator.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int getReverseEdgesCoordinator(arangodb::velocypack::Slice const&,
|
||||
arangodb::velocypack::Builder&);
|
||||
|
|
|
@ -2031,7 +2031,7 @@ int LogicalCollection::insert(Transaction* trx, VPackSlice const slice,
|
|||
// create marker
|
||||
MMFilesCrudMarker insertMarker(
|
||||
TRI_DF_MARKER_VPACK_DOCUMENT,
|
||||
trx->state()->idForMarker(), newSlice);
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), newSlice);
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
|
@ -2227,7 +2227,7 @@ int LogicalCollection::update(Transaction* trx, VPackSlice const newSlice,
|
|||
// create marker
|
||||
MMFilesCrudMarker updateMarker(
|
||||
TRI_DF_MARKER_VPACK_DOCUMENT,
|
||||
trx->state()->idForMarker(), builder->slice());
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
|
@ -2389,7 +2389,7 @@ int LogicalCollection::replace(Transaction* trx, VPackSlice const newSlice,
|
|||
// create marker
|
||||
MMFilesCrudMarker replaceMarker(
|
||||
TRI_DF_MARKER_VPACK_DOCUMENT,
|
||||
trx->state()->idForMarker(), builder->slice());
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
|
@ -2486,7 +2486,7 @@ int LogicalCollection::remove(arangodb::Transaction* trx,
|
|||
|
||||
// create marker
|
||||
MMFilesCrudMarker removeMarker(
|
||||
TRI_DF_MARKER_VPACK_REMOVE, trx->state()->idForMarker(),
|
||||
TRI_DF_MARKER_VPACK_REMOVE, static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(),
|
||||
builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
|
@ -2617,7 +2617,7 @@ int LogicalCollection::remove(arangodb::Transaction* trx,
|
|||
|
||||
// create marker
|
||||
MMFilesCrudMarker removeMarker(
|
||||
TRI_DF_MARKER_VPACK_REMOVE, trx->state()->idForMarker(),
|
||||
TRI_DF_MARKER_VPACK_REMOVE, static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(),
|
||||
builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker = &removeMarker;
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
namespace arangodb {
|
||||
|
||||
class MMFilesEdgeIndex;
|
||||
class LogicalCollection;
|
||||
class ManagedDocumentResult;
|
||||
|
||||
|
|
|
@ -97,6 +97,52 @@ else ()
|
|||
add_dependencies(arangodump zlibstatic)
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
## arangoexport
|
||||
################################################################################
|
||||
|
||||
if (MSVC)
|
||||
generate_product_version(ProductVersionFiles_arangoexport
|
||||
NAME arangoexport
|
||||
FILE_DESCRIPTION ${ARANGO_EXPORT_FRIENDLY_STRING}
|
||||
ICON ${ARANGO_ICON}
|
||||
VERSION_MAJOR ${CPACK_PACKAGE_VERSION_MAJOR}
|
||||
VERSION_MINOR ${CPACK_PACKAGE_VERSION_MINOR}
|
||||
VERSION_PATCH ${CPACK_PACKAGE_VERSION_PATCH}
|
||||
VERSION_REVISION ${BUILD_ID}
|
||||
)
|
||||
endif ()
|
||||
|
||||
add_executable(${BIN_ARANGOEXPORT}
|
||||
${ProductVersionFiles_arangoexport}
|
||||
${PROJECT_SOURCE_DIR}/lib/Basics/WorkMonitorDummy.cpp
|
||||
Export/ExportFeature.cpp
|
||||
Export/arangoexport.cpp
|
||||
Shell/ClientFeature.cpp
|
||||
Shell/ConsoleFeature.cpp
|
||||
V8Client/ArangoClientHelper.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(${BIN_ARANGOEXPORT}
|
||||
${LIB_ARANGO}
|
||||
${MSVC_LIBS}
|
||||
${SYSTEM_LIBRARIES}
|
||||
boost_system
|
||||
boost_boost
|
||||
)
|
||||
|
||||
install(
|
||||
TARGETS ${BIN_ARANGOEXPORT}
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
|
||||
install_config(arangoexport)
|
||||
|
||||
if (NOT USE_PRECOMPILED_V8)
|
||||
add_dependencies(arangoexport zlibstatic v8_build) # v8_build includes ICU build
|
||||
else ()
|
||||
add_dependencies(arangoexport zlibstatic) # v8_build includes ICU build
|
||||
endif ()
|
||||
|
||||
################################################################################
|
||||
## arangoimp
|
||||
################################################################################
|
||||
|
|
|
@ -0,0 +1,601 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ExportFeature.h"
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "Shell/ClientFeature.h"
|
||||
#include "SimpleHttpClient/GeneralClientConnection.h"
|
||||
#include "SimpleHttpClient/SimpleHttpClient.h"
|
||||
#include "SimpleHttpClient/SimpleHttpResult.h"
|
||||
|
||||
#include <boost/property_tree/detail/xml_parser_utils.hpp>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::httpclient;
|
||||
using namespace arangodb::options;
|
||||
using namespace boost::property_tree::xml_parser;
|
||||
|
||||
ExportFeature::ExportFeature(application_features::ApplicationServer* server,
|
||||
int* result)
|
||||
: ApplicationFeature(server, "Export"),
|
||||
_collections(),
|
||||
_graphName(),
|
||||
_xgmmlLabelAttribute("label"),
|
||||
_typeExport("json"),
|
||||
_xgmmlLabelOnly(false),
|
||||
_outputDirectory(),
|
||||
_overwrite(false),
|
||||
_progress(true),
|
||||
_firstLine(true),
|
||||
_skippedDeepNested(0),
|
||||
_httpRequestsDone(0),
|
||||
_currentCollection(),
|
||||
_currentGraph(),
|
||||
_result(result) {
|
||||
requiresElevatedPrivileges(false);
|
||||
setOptional(false);
|
||||
startsAfter("Client");
|
||||
startsAfter("Config");
|
||||
startsAfter("Logger");
|
||||
|
||||
_outputDirectory =
|
||||
FileUtils::buildFilename(FileUtils::currentDirectory(), "export");
|
||||
}
|
||||
|
||||
void ExportFeature::collectOptions(
|
||||
std::shared_ptr<options::ProgramOptions> options) {
|
||||
options->addOption(
|
||||
"--collection",
|
||||
"restrict to collection name (can be specified multiple times)",
|
||||
new VectorParameter<StringParameter>(&_collections));
|
||||
|
||||
options->addOption("--graph-name", "name of a graph to export",
|
||||
new StringParameter(&_graphName));
|
||||
|
||||
options->addOption("--xgmml-label-only", "export only xgmml label",
|
||||
new BooleanParameter(&_xgmmlLabelOnly));
|
||||
|
||||
options->addOption("--xgmml-label-attribute", "specify document attribute that will be the xgmml label",
|
||||
new StringParameter(&_xgmmlLabelAttribute));
|
||||
|
||||
options->addOption("--output-directory", "output directory",
|
||||
new StringParameter(&_outputDirectory));
|
||||
|
||||
options->addOption("--overwrite", "overwrite data in output directory",
|
||||
new BooleanParameter(&_overwrite));
|
||||
|
||||
options->addOption("--progress", "show progress",
|
||||
new BooleanParameter(&_progress));
|
||||
|
||||
std::unordered_set<std::string> exportsWithUpperCase = {"json", "jsonl", "xgmml",
|
||||
"JSON", "JSONL", "XGMML"};
|
||||
std::unordered_set<std::string> exports = {"json", "jsonl", "xgmml"};
|
||||
std::vector<std::string> exportsVector(exports.begin(), exports.end());
|
||||
std::string exportsJoined = StringUtils::join(exportsVector, ", ");
|
||||
options->addOption(
|
||||
"--type", "type of export (" + exportsJoined + ")",
|
||||
new DiscreteValuesParameter<StringParameter>(&_typeExport, exportsWithUpperCase));
|
||||
}
|
||||
|
||||
void ExportFeature::validateOptions(
|
||||
std::shared_ptr<options::ProgramOptions> options) {
|
||||
auto const& positionals = options->processingResult()._positionals;
|
||||
size_t n = positionals.size();
|
||||
|
||||
if (1 == n) {
|
||||
_outputDirectory = positionals[0];
|
||||
} else if (1 < n) {
|
||||
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at most one directory, got " +
|
||||
StringUtils::join(positionals, ", ");
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
// trim trailing slash from path because it may cause problems on ...
|
||||
// Windows
|
||||
if (!_outputDirectory.empty() &&
|
||||
_outputDirectory.back() == TRI_DIR_SEPARATOR_CHAR) {
|
||||
TRI_ASSERT(_outputDirectory.size() > 0);
|
||||
_outputDirectory.pop_back();
|
||||
}
|
||||
|
||||
if (_graphName.empty() && _collections.empty()) {
|
||||
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting at least one collection or one graph name";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
std::transform(_typeExport.begin(), _typeExport.end(), _typeExport.begin(), ::tolower);
|
||||
|
||||
if (_typeExport == "xgmml" && _graphName.empty() ) {
|
||||
LOG_TOPIC(FATAL, Logger::CONFIG) << "expecting a graph name to dump a graph";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::prepare() {
|
||||
bool isDirectory = false;
|
||||
bool isEmptyDirectory = false;
|
||||
|
||||
if (!_outputDirectory.empty()) {
|
||||
isDirectory = TRI_IsDirectory(_outputDirectory.c_str());
|
||||
|
||||
if (isDirectory) {
|
||||
std::vector<std::string> files(TRI_FullTreeDirectory(_outputDirectory.c_str()));
|
||||
// we don't care if the target directory is empty
|
||||
isEmptyDirectory = (files.size() <= 1); // TODO: TRI_FullTreeDirectory always returns at least one element (""), even if directory is empty?
|
||||
}
|
||||
}
|
||||
|
||||
if (_outputDirectory.empty() ||
|
||||
(TRI_ExistsFile(_outputDirectory.c_str()) && !isDirectory)) {
|
||||
LOG_TOPIC(FATAL, Logger::SYSCALL) << "cannot write to output directory '" << _outputDirectory
|
||||
<< "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (isDirectory && !isEmptyDirectory && !_overwrite) {
|
||||
LOG_TOPIC(FATAL, Logger::SYSCALL) << "output directory '" << _outputDirectory
|
||||
<< "' already exists. use \"--overwrite true\" to "
|
||||
"overwrite data in it";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (!isDirectory) {
|
||||
long systemError;
|
||||
std::string errorMessage;
|
||||
int res = TRI_CreateDirectory(_outputDirectory.c_str(), systemError,
|
||||
errorMessage);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
LOG_TOPIC(ERR, Logger::SYSCALL) << "unable to create output directory '" << _outputDirectory
|
||||
<< "': " << errorMessage;
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::start() {
|
||||
ClientFeature* client = application_features::ApplicationServer::getFeature<ClientFeature>("Client");
|
||||
|
||||
int ret = EXIT_SUCCESS;
|
||||
*_result = ret;
|
||||
|
||||
std::unique_ptr<SimpleHttpClient> httpClient;
|
||||
|
||||
try {
|
||||
httpClient = client->createHttpClient();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << "cannot create server connection, giving up!";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
httpClient->setLocationRewriter(static_cast<void*>(client), &rewriteLocation);
|
||||
httpClient->setUserNamePassword("/", client->username(), client->password());
|
||||
|
||||
// must stay here in order to establish the connection
|
||||
httpClient->getServerVersion();
|
||||
|
||||
if (!httpClient->isConnected()) {
|
||||
LOG_TOPIC(ERR, Logger::COMMUNICATION) << "Could not connect to endpoint '" << client->endpoint()
|
||||
<< "', database: '" << client->databaseName() << "', username: '"
|
||||
<< client->username() << "'";
|
||||
LOG_TOPIC(FATAL, Logger::COMMUNICATION) << httpClient->getErrorMessage() << "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
// successfully connected
|
||||
std::cout << "Connected to ArangoDB '"
|
||||
<< httpClient->getEndpointSpecification() << "', version "
|
||||
<< httpClient->getServerVersion() << ", database: '"
|
||||
<< client->databaseName() << "', username: '" << client->username()
|
||||
<< "'" << std::endl;
|
||||
|
||||
uint64_t exportedSize = 0;
|
||||
|
||||
if (_typeExport == "json" || _typeExport == "jsonl") {
|
||||
if (_collections.size()) {
|
||||
collectionExport(httpClient.get());
|
||||
|
||||
for(auto const& collection : _collections) {
|
||||
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
|
||||
int64_t fileSize = TRI_SizeFile(filePath.c_str());
|
||||
|
||||
if (0 < fileSize) {
|
||||
exportedSize += fileSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (_typeExport == "xgmml" && _graphName.size()) {
|
||||
graphExport(httpClient.get());
|
||||
std::string filePath = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
|
||||
int64_t fileSize = TRI_SizeFile(filePath.c_str());
|
||||
|
||||
if (0 < fileSize) {
|
||||
exportedSize += fileSize;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Processed " << _collections.size() << " collection(s), wrote " << exportedSize << " Byte(s), " << _httpRequestsDone << " HTTP request(s)" << std::endl;
|
||||
|
||||
*_result = ret;
|
||||
}
|
||||
|
||||
void ExportFeature::collectionExport(SimpleHttpClient* httpClient) {
|
||||
std::string errorMsg;
|
||||
|
||||
for (auto const& collection : _collections) {
|
||||
if (_progress) {
|
||||
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
|
||||
}
|
||||
|
||||
_currentCollection = collection;
|
||||
|
||||
std::string fileName =
|
||||
_outputDirectory + TRI_DIR_SEPARATOR_STR + collection + "." + _typeExport;
|
||||
|
||||
// remove an existing file first
|
||||
if (TRI_ExistsFile(fileName.c_str())) {
|
||||
TRI_UnlinkFile(fileName.c_str());
|
||||
}
|
||||
|
||||
int fd = -1;
|
||||
TRI_DEFER(TRI_CLOSE(fd));
|
||||
|
||||
std::string const url = "_api/cursor";
|
||||
|
||||
VPackBuilder post;
|
||||
post.openObject();
|
||||
post.add("query", VPackValue("FOR doc IN @@collection RETURN doc"));
|
||||
post.add("bindVars", VPackValue(VPackValueType::Object));
|
||||
post.add("@collection", VPackValue(collection));
|
||||
post.close();
|
||||
post.close();
|
||||
|
||||
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
|
||||
VPackSlice body = parsedBody->slice();
|
||||
|
||||
fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC,
|
||||
S_IRUSR | S_IWUSR);
|
||||
|
||||
if (fd < 0) {
|
||||
errorMsg = "cannot write to file '" + fileName + "'";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||
}
|
||||
|
||||
_firstLine = true;
|
||||
if (_typeExport == "json") {
|
||||
std::string openingBracket = "[\n";
|
||||
writeToFile(fd, openingBracket, fileName);
|
||||
}
|
||||
|
||||
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||
|
||||
while (body.hasKey("id")) {
|
||||
std::string const url = "/_api/cursor/"+body.get("id").copyString();
|
||||
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
|
||||
body = parsedBody->slice();
|
||||
|
||||
writeCollectionBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||
}
|
||||
if (_typeExport == "json") {
|
||||
std::string closingBracket = "]\n";
|
||||
writeToFile(fd, closingBracket , fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::writeCollectionBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
|
||||
std::string line;
|
||||
|
||||
for (auto const& doc : it) {
|
||||
line.clear();
|
||||
|
||||
if (_firstLine && _typeExport == "json") {
|
||||
_firstLine = false;
|
||||
} else if(!_firstLine && _typeExport == "json") {
|
||||
line.push_back(',');
|
||||
}
|
||||
|
||||
line += doc.toJson();
|
||||
line.push_back('\n');
|
||||
writeToFile(fd, line, fileName);
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::writeToFile(int fd, std::string& line, std::string const& fileName) {
|
||||
if (!TRI_WritePointer(fd, line.c_str(), line.size())) {
|
||||
std::string errorMsg = "cannot write to file '" + fileName + "'";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<VPackBuilder> ExportFeature::httpCall(SimpleHttpClient* httpClient, std::string const& url, rest::RequestType requestType, std::string postBody) {
|
||||
std::string errorMsg;
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
httpClient->request(requestType, url, postBody.c_str(), postBody.size()));
|
||||
_httpRequestsDone++;
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg =
|
||||
"got invalid response from server: " + httpClient->getErrorMessage();
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||
}
|
||||
|
||||
std::shared_ptr<VPackBuilder> parsedBody;
|
||||
|
||||
if (response->wasHttpError()) {
|
||||
|
||||
if (response->getHttpReturnCode() == 404) {
|
||||
if (_currentGraph.size()) {
|
||||
LOG_TOPIC(FATAL, Logger::CONFIG) << "Graph '" << _currentGraph << "' not found.";
|
||||
} else if (_currentCollection.size()) {
|
||||
LOG_TOPIC(FATAL, Logger::CONFIG) << "Collection " << _currentCollection << "not found.";
|
||||
}
|
||||
|
||||
FATAL_ERROR_EXIT();
|
||||
} else {
|
||||
parsedBody = response->getBodyVelocyPack();
|
||||
std::cout << parsedBody->toJson() << std::endl;
|
||||
errorMsg = "got invalid response from server: HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
parsedBody = response->getBodyVelocyPack();
|
||||
} catch (...) {
|
||||
errorMsg = "got malformed JSON response from server";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||
}
|
||||
|
||||
VPackSlice body = parsedBody->slice();
|
||||
|
||||
if (!body.isObject()) {
|
||||
errorMsg = "got malformed JSON response from server";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, errorMsg);
|
||||
}
|
||||
|
||||
return parsedBody;
|
||||
}
|
||||
|
||||
void ExportFeature::graphExport(SimpleHttpClient* httpClient) {
|
||||
std::string errorMsg;
|
||||
|
||||
_currentGraph = _graphName;
|
||||
|
||||
if (_collections.empty()) {
|
||||
if (_progress) {
|
||||
std::cout << "# Export graph '" << _graphName << "'" << std::endl;
|
||||
}
|
||||
std::string const url = "/_api/gharial/" + _graphName;
|
||||
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::GET);
|
||||
VPackSlice body = parsedBody->slice();
|
||||
|
||||
std::unordered_set<std::string> collections;
|
||||
|
||||
for(auto const& edgeDefs : VPackArrayIterator(body.get("graph").get("edgeDefinitions"))) {
|
||||
collections.insert(edgeDefs.get("collection").copyString());
|
||||
|
||||
for(auto const& from : VPackArrayIterator(edgeDefs.get("from"))) {
|
||||
collections.insert(from.copyString());
|
||||
}
|
||||
|
||||
for(auto const& to : VPackArrayIterator(edgeDefs.get("to"))) {
|
||||
collections.insert(to.copyString());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto const& cn : collections) {
|
||||
_collections.push_back(cn);
|
||||
}
|
||||
} else {
|
||||
if (_progress) {
|
||||
std::cout << "# Export graph with collections " << StringUtils::join(_collections, ", ") << " as '" << _graphName << "'" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
std::string fileName = _outputDirectory + TRI_DIR_SEPARATOR_STR + _graphName + "." + _typeExport;
|
||||
|
||||
// remove an existing file first
|
||||
if (TRI_ExistsFile(fileName.c_str())) {
|
||||
TRI_UnlinkFile(fileName.c_str());
|
||||
}
|
||||
|
||||
int fd = TRI_CREATE(fileName.c_str(), O_CREAT | O_EXCL | O_RDWR | TRI_O_CLOEXEC, S_IRUSR | S_IWUSR);
|
||||
|
||||
if (fd < 0) {
|
||||
errorMsg = "cannot write to file '" + fileName + "'";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CANNOT_WRITE_FILE, errorMsg);
|
||||
}
|
||||
TRI_DEFER(TRI_CLOSE(fd));
|
||||
|
||||
std::string xmlHeader = R"(<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<graph label=")";
|
||||
writeToFile(fd, xmlHeader, fileName);
|
||||
writeToFile(fd, _graphName, fileName);
|
||||
|
||||
xmlHeader = R"("
|
||||
xmlns="http://www.cs.rpi.edu/XGMML"
|
||||
directed="1">
|
||||
)";
|
||||
writeToFile(fd, xmlHeader, fileName);
|
||||
|
||||
for (auto const& collection : _collections) {
|
||||
if (_progress) {
|
||||
std::cout << "# Exporting collection '" << collection << "'..." << std::endl;
|
||||
}
|
||||
|
||||
std::string const url = "_api/cursor";
|
||||
|
||||
VPackBuilder post;
|
||||
post.openObject();
|
||||
post.add("query", VPackValue("FOR doc IN @@collection RETURN doc"));
|
||||
post.add("bindVars", VPackValue(VPackValueType::Object));
|
||||
post.add("@collection", VPackValue(collection));
|
||||
post.close();
|
||||
post.close();
|
||||
|
||||
std::shared_ptr<VPackBuilder> parsedBody = httpCall(httpClient, url, rest::RequestType::POST, post.toJson());
|
||||
VPackSlice body = parsedBody->slice();
|
||||
|
||||
writeGraphBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||
|
||||
while (body.hasKey("id")) {
|
||||
std::string const url = "/_api/cursor/"+body.get("id").copyString();
|
||||
parsedBody = httpCall(httpClient, url, rest::RequestType::PUT);
|
||||
body = parsedBody->slice();
|
||||
|
||||
writeGraphBatch(fd, VPackArrayIterator(body.get("result")), fileName);
|
||||
}
|
||||
}
|
||||
std::string closingGraphTag = "</graph>\n";
|
||||
writeToFile(fd, closingGraphTag, fileName);
|
||||
|
||||
if (_skippedDeepNested) {
|
||||
std::cout << "skipped " << _skippedDeepNested << " deep nested objects / arrays" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::writeGraphBatch(int fd, VPackArrayIterator it, std::string const& fileName) {
|
||||
std::string xmlTag;
|
||||
|
||||
for(auto const& doc : it) {
|
||||
if (doc.hasKey("_from")) {
|
||||
xmlTag = "<edge label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
|
||||
"\" source=\"" + encode_char_entities(doc.get("_from").copyString()) + "\" target=\"" + encode_char_entities(doc.get("_to").copyString()) + "\"";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
if (!_xgmmlLabelOnly) {
|
||||
xmlTag = ">\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
for (auto const& it : VPackObjectIterator(doc)) {
|
||||
xmlTag = encode_char_entities(it.key.copyString());
|
||||
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
|
||||
}
|
||||
|
||||
xmlTag = "</edge>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
} else {
|
||||
xmlTag = " />\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
}
|
||||
|
||||
} else {
|
||||
xmlTag = "<node label=\"" + encode_char_entities(doc.hasKey(_xgmmlLabelAttribute) && doc.get(_xgmmlLabelAttribute).isString() ? doc.get(_xgmmlLabelAttribute).copyString() : "Default-Label") +
|
||||
"\" id=\"" + encode_char_entities(doc.get("_id").copyString()) + "\"";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
if (!_xgmmlLabelOnly) {
|
||||
xmlTag = ">\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
for (auto const& it : VPackObjectIterator(doc)) {
|
||||
xmlTag = encode_char_entities(it.key.copyString());
|
||||
xgmmlWriteOneAtt(fd, fileName, it.value, xmlTag);
|
||||
}
|
||||
|
||||
xmlTag = "</node>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
} else {
|
||||
xmlTag = " />\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ExportFeature::xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackSlice const& slice, std::string& name, int deep) {
|
||||
std::string value, type, xmlTag;
|
||||
|
||||
if (deep == 0 &&
|
||||
(name == "_id" || name == "_key" || name == "_rev" || name == "_from" || name == "_to")) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (slice.isInteger()) {
|
||||
type = "integer";
|
||||
value = slice.toString();
|
||||
|
||||
} else if (slice.isDouble()) {
|
||||
type = "real";
|
||||
value = slice.toString();
|
||||
|
||||
} else if (slice.isBool()) {
|
||||
type = "boolean";
|
||||
value = slice.toString();
|
||||
|
||||
} else if (slice.isString()) {
|
||||
type = "string";
|
||||
value = slice.copyString();
|
||||
|
||||
} else if (slice.isArray() || slice.isObject()) {
|
||||
if (0 < deep) {
|
||||
if (_skippedDeepNested == 0) {
|
||||
std::cout << "Warning: skip deep nested objects / arrays" << std::endl;
|
||||
}
|
||||
_skippedDeepNested++;
|
||||
return;
|
||||
}
|
||||
|
||||
} else {
|
||||
xmlTag = " <att name=\"" + name + "\" type=\"string\" value=\"" + encode_char_entities(slice.toString()) + "\"/>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!type.empty()) {
|
||||
xmlTag = " <att name=\"" + name + "\" type=\"" + type + "\" value=\"" + encode_char_entities(value) + "\"/>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
} else if (slice.isArray()) {
|
||||
xmlTag = " <att name=\"" + name + "\" type=\"list\">\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
for (auto const& val : VPackArrayIterator(slice)) {
|
||||
xgmmlWriteOneAtt(fd, fileName, val, name, deep + 1);
|
||||
}
|
||||
|
||||
xmlTag = " </att>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
} else if (slice.isObject()) {
|
||||
xmlTag = " <att name=\"" + name + "\" type=\"list\">\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
|
||||
for (auto const& it : VPackObjectIterator(slice)) {
|
||||
std::string name = encode_char_entities(it.key.copyString());
|
||||
xgmmlWriteOneAtt(fd, fileName, it.value, name, deep + 1);
|
||||
}
|
||||
|
||||
xmlTag = " </att>\n";
|
||||
writeToFile(fd, xmlTag, fileName);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_EXPORT_EXPORT_FEATURE_H
|
||||
#define ARANGODB_EXPORT_EXPORT_FEATURE_H 1
|
||||
|
||||
#include "ApplicationFeatures/ApplicationFeature.h"
|
||||
#include "V8Client/ArangoClientHelper.h"
|
||||
#include "lib/Rest/CommonDefines.h"
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
namespace arangodb {
|
||||
namespace httpclient {
|
||||
class GeneralClientConnection;
|
||||
class SimpleHttpClient;
|
||||
class SimpleHttpResult;
|
||||
}
|
||||
|
||||
class ExportFeature final : public application_features::ApplicationFeature,
|
||||
public ArangoClientHelper {
|
||||
public:
|
||||
ExportFeature(application_features::ApplicationServer* server,
|
||||
int* result);
|
||||
|
||||
public:
|
||||
void collectOptions(std::shared_ptr<options::ProgramOptions>) override;
|
||||
void validateOptions(
|
||||
std::shared_ptr<options::ProgramOptions> options) override;
|
||||
void prepare() override final;
|
||||
void start() override final;
|
||||
|
||||
private:
|
||||
void collectionExport(httpclient::SimpleHttpClient* httpClient);
|
||||
void writeCollectionBatch(int fd, VPackArrayIterator it, std::string const& fileName);
|
||||
void graphExport(httpclient::SimpleHttpClient* httpClient);
|
||||
void writeGraphBatch(int fd, VPackArrayIterator it, std::string const& fileName);
|
||||
void xgmmlWriteOneAtt(int fd, std::string const& fileName, VPackSlice const& slice, std::string& name, int deep = 0);
|
||||
|
||||
void writeToFile(int fd, std::string& string, std::string const& fileName);
|
||||
std::shared_ptr<VPackBuilder> httpCall(httpclient::SimpleHttpClient* httpClient, std::string const& url, arangodb::rest::RequestType, std::string postBody = "");
|
||||
|
||||
private:
|
||||
std::vector<std::string> _collections;
|
||||
std::string _graphName;
|
||||
std::string _xgmmlLabelAttribute;
|
||||
std::string _typeExport;
|
||||
bool _xgmmlLabelOnly;
|
||||
|
||||
std::string _outputDirectory;
|
||||
bool _overwrite;
|
||||
bool _progress;
|
||||
|
||||
bool _firstLine;
|
||||
uint64_t _skippedDeepNested;
|
||||
uint64_t _httpRequestsDone;
|
||||
std::string _currentCollection;
|
||||
std::string _currentGraph;
|
||||
|
||||
int* _result;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,83 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/directories.h"
|
||||
|
||||
#include "ApplicationFeatures/ConfigFeature.h"
|
||||
#include "ApplicationFeatures/GreetingsFeature.h"
|
||||
#include "ApplicationFeatures/ShutdownFeature.h"
|
||||
#include "ApplicationFeatures/TempFeature.h"
|
||||
#include "ApplicationFeatures/VersionFeature.h"
|
||||
#include "Basics/ArangoGlobalContext.h"
|
||||
#include "Export/ExportFeature.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LoggerFeature.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "Random/RandomFeature.h"
|
||||
#include "Shell/ClientFeature.h"
|
||||
#include "Ssl/SslFeature.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::application_features;
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
ArangoGlobalContext context(argc, argv, BIN_DIRECTORY);
|
||||
context.installHup();
|
||||
|
||||
std::shared_ptr<options::ProgramOptions> options(new options::ProgramOptions(
|
||||
argv[0], "Usage: arangoexport [<options>]", "For more information use:", BIN_DIRECTORY));
|
||||
|
||||
ApplicationServer server(options, BIN_DIRECTORY);
|
||||
|
||||
int ret;
|
||||
|
||||
server.addFeature(new ClientFeature(&server));
|
||||
server.addFeature(new ConfigFeature(&server, "arangoexport"));
|
||||
server.addFeature(new GreetingsFeature(&server, "arangoexport"));
|
||||
server.addFeature(new ExportFeature(&server, &ret));
|
||||
server.addFeature(new LoggerFeature(&server, false));
|
||||
server.addFeature(new RandomFeature(&server));
|
||||
server.addFeature(new ShutdownFeature(&server, {"Export"}));
|
||||
server.addFeature(new SslFeature(&server));
|
||||
server.addFeature(new TempFeature(&server, "arangoexport"));
|
||||
server.addFeature(new VersionFeature(&server));
|
||||
|
||||
try {
|
||||
server.run(argc, argv);
|
||||
if (server.helpShown()) {
|
||||
// --help was displayed
|
||||
ret = EXIT_SUCCESS;
|
||||
}
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(ERR, Logger::STARTUP) << "arangoexport terminated because of an unhandled exception: "
|
||||
<< ex.what();
|
||||
ret = EXIT_FAILURE;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(ERR, Logger::STARTUP) << "arangoexport terminated because of an unhandled exception of "
|
||||
"unknown type";
|
||||
ret = EXIT_FAILURE;
|
||||
}
|
||||
|
||||
return context.exit(ret);
|
||||
}
|
|
@ -24,6 +24,11 @@ install_debinfo(
|
|||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
"${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
install_debinfo(
|
||||
"${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip"
|
||||
"${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}"
|
||||
|
|
|
@ -61,6 +61,20 @@ install(
|
|||
install_config(arangorestore)
|
||||
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
execute_process(COMMAND "rm" -f ${STRIP_FILE})
|
||||
execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR})
|
||||
execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE})
|
||||
set(FILE ${STRIP_FILE})
|
||||
endif()
|
||||
install(
|
||||
PROGRAMS ${FILE}
|
||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
install_config(arangoexport)
|
||||
|
||||
|
||||
set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX})
|
||||
if (NOT MSVC AND CMAKE_STRIP)
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
# config file for arangoexport
|
||||
|
||||
[server]
|
||||
endpoint = tcp://127.0.0.1:8529
|
||||
authentication = true
|
||||
# username = root
|
||||
# password =
|
||||
|
||||
[log]
|
||||
file = -
|
|
@ -0,0 +1,7 @@
|
|||
[server]
|
||||
authentication = false
|
||||
# username = root
|
||||
# password =
|
||||
|
||||
[log]
|
||||
file = -
|
|
@ -177,13 +177,13 @@ function delete_api_index (req, res) {
|
|||
return;
|
||||
}
|
||||
|
||||
var iid = parseInt(decodeURIComponent(req.suffix[1]), 10);
|
||||
var dropped = collection.dropIndex(name + '/' + iid);
|
||||
var id = name + '/' + req.suffix[1];
|
||||
var dropped = collection.dropIndex(id);
|
||||
|
||||
if (dropped) {
|
||||
actions.resultOk(req, res, actions.HTTP_OK, { id: name + '/' + iid });
|
||||
actions.resultOk(req, res, actions.HTTP_OK, { id });
|
||||
} else {
|
||||
actions.indexNotFound(req, res, collection, name + '/' + iid);
|
||||
actions.indexNotFound(req, res, collection, id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -230,8 +230,7 @@ actions.defineHttp({
|
|||
actions.resultUnsupported(req, res);
|
||||
} else {
|
||||
var name = body.collection;
|
||||
var id = parseInt(name, 10) || name;
|
||||
var collection = db._collection(id);
|
||||
var collection = db._collection(name);
|
||||
|
||||
if (collection === null) {
|
||||
actions.collectionNotFound(req, res, name);
|
||||
|
|
|
@ -38,6 +38,7 @@ const functionsDocumentation = {
|
|||
'cluster_sync': 'cluster sync tests',
|
||||
'dump': 'dump tests',
|
||||
'dump_authentication': 'dump tests with authentication',
|
||||
'export': 'export formats tests',
|
||||
'dfdb': 'start test',
|
||||
'endpoints': 'endpoints tests',
|
||||
'foxx_manager': 'foxx manager tests',
|
||||
|
@ -246,6 +247,7 @@ let ARANGODUMP_BIN;
|
|||
let ARANGOD_BIN;
|
||||
let ARANGOIMP_BIN;
|
||||
let ARANGORESTORE_BIN;
|
||||
let ARANGOEXPORT_BIN;
|
||||
let ARANGOSH_BIN;
|
||||
let CONFIG_ARANGODB_DIR;
|
||||
let CONFIG_RELATIVE_DIR;
|
||||
|
@ -1994,6 +1996,7 @@ let allTests = [
|
|||
'config',
|
||||
'dump',
|
||||
'dump_authentication',
|
||||
'export',
|
||||
'dfdb',
|
||||
'endpoints',
|
||||
'http_server',
|
||||
|
@ -2706,6 +2709,7 @@ testFuncs.config = function (options) {
|
|||
'arangodump',
|
||||
'arangoimp',
|
||||
'arangorestore',
|
||||
'arangoexport',
|
||||
'arangosh',
|
||||
'arango-dfdb',
|
||||
'foxx-manager'
|
||||
|
@ -2877,6 +2881,98 @@ testFuncs.dump = function (options) {
|
|||
return results;
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief TEST: dump
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testFuncs.export = function (options) {
|
||||
const cluster = options.cluster ? '-cluster' : '';
|
||||
|
||||
print(CYAN + 'export tests...' + RESET);
|
||||
|
||||
const instanceInfo = startInstance('tcp', options, {}, 'export');
|
||||
|
||||
if (instanceInfo === false) {
|
||||
return {
|
||||
export: {
|
||||
status: false,
|
||||
message: 'failed to start server!'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
print(CYAN + Date() + ': Setting up' + RESET);
|
||||
|
||||
const results = {};
|
||||
|
||||
function shutdown() {
|
||||
print(CYAN + 'Shutting down...' + RESET);
|
||||
shutdownInstance(instanceInfo, options);
|
||||
print(CYAN + 'done.' + RESET);
|
||||
|
||||
print();
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
results.setup = runInArangosh(options, instanceInfo, makePathUnix('js/server/tests/export/export-setup' + cluster + '.js'));
|
||||
if (!checkInstanceAlive(instanceInfo, options) || true !== results.setup.status) {
|
||||
return shutdown();
|
||||
}
|
||||
|
||||
print(CYAN + Date() + ': Export data' + RESET);
|
||||
|
||||
results.export = (() => {
|
||||
const args = {
|
||||
'configuration': fs.join(CONFIG_DIR, 'arangoexport.conf'),
|
||||
'server.username': options.username,
|
||||
'server.password': options.password,
|
||||
'server.endpoint': instanceInfo.endpoint,
|
||||
'server.database': 'UnitTestsExport',
|
||||
'collection':'UnitTestsExport',
|
||||
'type':'json',
|
||||
'overwrite':true,
|
||||
'output-directory':'export'
|
||||
};
|
||||
|
||||
return executeAndWait(ARANGOEXPORT_BIN, toArgv(args), options);
|
||||
})();
|
||||
|
||||
|
||||
/*
|
||||
results.dump = runArangoDumpRestore(options, instanceInfo, 'dump',
|
||||
'UnitTestsDumpSrc');
|
||||
|
||||
if (checkInstanceAlive(instanceInfo, options) &&
|
||||
(results.dump.status === true)) {
|
||||
print(CYAN + Date() + ': Dump and Restore - restore' + RESET);
|
||||
|
||||
results.restore = runArangoDumpRestore(options, instanceInfo, 'restore',
|
||||
'UnitTestsDumpDst');
|
||||
|
||||
if (checkInstanceAlive(instanceInfo, options) &&
|
||||
(results.restore.status === true)) {
|
||||
print(CYAN + Date() + ': Dump and Restore - dump after restore' + RESET);
|
||||
|
||||
results.test = runInArangosh(options, instanceInfo,
|
||||
makePathUnix('js/server/tests/dump/dump' + cluster + '.js'), {
|
||||
'server.database': 'UnitTestsDumpDst'
|
||||
});
|
||||
|
||||
if (checkInstanceAlive(instanceInfo, options) &&
|
||||
(results.test.status === true)) {
|
||||
print(CYAN + Date() + ': Dump and Restore - teardown' + RESET);
|
||||
|
||||
results.tearDown = runInArangosh(options, instanceInfo,
|
||||
makePathUnix('js/server/tests/dump/dump-teardown' + cluster + '.js'));
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
return shutdown();
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief TEST: dump_authentication
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -4270,6 +4366,7 @@ function unitTest (cases, options) {
|
|||
ARANGOD_BIN = fs.join(BIN_DIR, 'arangod' + executable_ext);
|
||||
ARANGOIMP_BIN = fs.join(BIN_DIR, 'arangoimp' + executable_ext);
|
||||
ARANGORESTORE_BIN = fs.join(BIN_DIR, 'arangorestore' + executable_ext);
|
||||
ARANGOEXPORT_BIN = fs.join(BIN_DIR, 'arangoexport' + executable_ext);
|
||||
ARANGOSH_BIN = fs.join(BIN_DIR, 'arangosh' + executable_ext);
|
||||
|
||||
CONFIG_ARANGODB_DIR = fs.join(TOP_DIR, builddir, 'etc', 'arangodb3');
|
||||
|
@ -4287,6 +4384,7 @@ function unitTest (cases, options) {
|
|||
ARANGOD_BIN,
|
||||
ARANGOIMP_BIN,
|
||||
ARANGORESTORE_BIN,
|
||||
ARANGOEXPORT_BIN,
|
||||
ARANGOSH_BIN];
|
||||
for (let b = 0; b < checkFiles.length; ++b) {
|
||||
if (!fs.isFile(checkFiles[b])) {
|
||||
|
|
|
@ -66,7 +66,7 @@ function DatabaseSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testVersion : function () {
|
||||
assertMatch(/(^3\.[1])|(devel$)/, internal.db._version());
|
||||
assertMatch(/(^@ARANGODB_VERSION_MAJOR@\.@ARANGODB_VERSION_MINOR@)|(devel$)/, internal.db._version());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
|
@ -0,0 +1,51 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen:4000, unused:false */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief setup collections for dump/reload tests
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
'use strict';
|
||||
|
||||
{
|
||||
const db = require("@arangodb").db;
|
||||
|
||||
try {
|
||||
db._dropDatabase("UnitTestsExport");
|
||||
} catch (e) {}
|
||||
|
||||
db._createDatabase("UnitTestsExport");
|
||||
|
||||
db._useDatabase("UnitTestsExport");
|
||||
|
||||
const col = db._create("UnitTestsExport");
|
||||
for (let i = 0; i < 100; ++i) {
|
||||
col.save({ _key: "export" + i, value1: i, value2: "this is export", value3: "export" + i });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: true
|
||||
};
|
|
@ -0,0 +1,51 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen:4000, unused:false */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief setup collections for dump/reload tests
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Manuel Baesler
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
'use strict';
|
||||
|
||||
{
|
||||
const db = require("@arangodb").db;
|
||||
|
||||
try {
|
||||
db._dropDatabase("UnitTestsExport");
|
||||
} catch (e) {}
|
||||
|
||||
db._createDatabase("UnitTestsExport");
|
||||
|
||||
db._useDatabase("UnitTestsExport");
|
||||
|
||||
const col = db._create("UnitTestsExport");
|
||||
for (let i = 0; i < 100; ++i) {
|
||||
col.save({ _key: "export" + i, value1: i, value2: "this is export", value3: "export" + i });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: true
|
||||
};
|
Loading…
Reference in New Issue