mirror of https://gitee.com/bigwinds/arangodb
fix dump&restore of smart graphs (#5065)
* Added tests for dump-restore of SmartGraphs * Arangosh will now expose isSmart and the smartGraphAttribute on properties * RestReplicationHandler will now ignore smart-graph collections unless you execute it with force * Added changelog * Reactivated original mmfiles dump/restore test * Skip hidden smart graph collections in arangodump * Do not dump shadowCollections metadata of smart edge collections * Cluster optimizer rules for soerted gather nodes now handle virtual edge collections correctly * Added a dump/restore tests for smartgraphs in rocksdb as well * Deactivated checks for writesExecuted statistics in dump/restore tests for smartgraphs mmfiles * Really exclude shadowCollections * Reduced loglevel * Added tests * Don't change single-server behaviour * Fix tests for omitted shadowCollections and hidden collections * Activated statistics in MMFIles dump test again and included isEnterprise in rocksdb dump test * A modification node can now disableStatistics, which means it does not contribute to query->extras() this is only relevant in SmartGraph case so far. * Added a test to dump&restore satellite collections * Bugfix: restore satellite collections properly * Added regression test for internal issue #2237 * Fix bug #2237 * Updated CHANGELOG * Copied dump/restore tests to rockdsb * Removed enterprise test * Added inline comment for smart-edge collections in optimizer rules * Removed duplicate CHANGELOG entry * Simplified removal of shadowCollections
This commit is contained in:
parent
c07a706948
commit
5decb66d01
17
CHANGELOG
17
CHANGELOG
|
@ -1,12 +1,25 @@
|
||||||
v3.3.6.1 (XXXX-XX-XX)
|
v3.3.6.1 (XXXX-XX-XX)
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
* Fixed internal issue #2237: AQL queries on collections with replicationFactor:
|
||||||
|
"satellite" crashed arangod in single server mode
|
||||||
|
|
||||||
|
* Fixed restore of satellite collections: replicationFactor was set to 1 during
|
||||||
|
restore
|
||||||
|
|
||||||
|
* Fixed dump and restore of smart graphs:
|
||||||
|
a) The dump will not include the hidden shadow collections anymore, they were dumped
|
||||||
|
accidentially and only contain duplicated data.
|
||||||
|
b) Restore will now ignore hidden shadow collections as all data is contained
|
||||||
|
in the smart-edge collection. You can manually include these collections from an
|
||||||
|
old dump (3.3.5 or earlier) by using `--force`.
|
||||||
|
c) Restore of a smart-graph will now create smart collections properly instead
|
||||||
|
of getting into `TIMEOUT_IN_CLUSTER_OPERATION`
|
||||||
|
|
||||||
* fixed issue in AQL query optimizer rule "restrict-to-single-shard", which
|
* fixed issue in AQL query optimizer rule "restrict-to-single-shard", which
|
||||||
may have sent documents to a wrong shard in AQL INSERT queries that specified
|
may have sent documents to a wrong shard in AQL INSERT queries that specified
|
||||||
the value for `_key` using an expression (and not a constant value)
|
the value for `_key` using an expression (and not a constant value)
|
||||||
|
|
||||||
* fix restoring of smart graph edge collections (may have run into timeout before)
|
|
||||||
|
|
||||||
* added /_admin/status for debugging
|
* added /_admin/status for debugging
|
||||||
|
|
||||||
* added ArangoShell helper function for packaging all information about an
|
* added ArangoShell helper function for packaging all information about an
|
||||||
|
|
|
@ -56,7 +56,7 @@ EnumerateCollectionBlock::EnumerateCollectionBlock(
|
||||||
int EnumerateCollectionBlock::initialize() {
|
int EnumerateCollectionBlock::initialize() {
|
||||||
DEBUG_BEGIN_BLOCK();
|
DEBUG_BEGIN_BLOCK();
|
||||||
|
|
||||||
if (_collection->isSatellite()) {
|
if (ServerState::instance()->isRunningInCluster() && _collection->isSatellite()) {
|
||||||
auto logicalCollection = _collection->getCollection();
|
auto logicalCollection = _collection->getCollection();
|
||||||
auto cid = logicalCollection->planId();
|
auto cid = logicalCollection->planId();
|
||||||
auto dbName = logicalCollection->dbName();
|
auto dbName = logicalCollection->dbName();
|
||||||
|
|
|
@ -46,7 +46,8 @@ ModificationBlock::ModificationBlock(ExecutionEngine* engine,
|
||||||
_outRegNew(ExecutionNode::MaxRegisterId),
|
_outRegNew(ExecutionNode::MaxRegisterId),
|
||||||
_collection(ep->_collection),
|
_collection(ep->_collection),
|
||||||
_isDBServer(false),
|
_isDBServer(false),
|
||||||
_usesDefaultSharding(true) {
|
_usesDefaultSharding(true),
|
||||||
|
_countStats(ep->countStats()) {
|
||||||
|
|
||||||
_trx->pinData(_collection->cid());
|
_trx->pinData(_collection->cid());
|
||||||
|
|
||||||
|
@ -183,13 +184,17 @@ void ModificationBlock::handleResult(int code, bool ignoreErrors,
|
||||||
std::string const* errorMessage) {
|
std::string const* errorMessage) {
|
||||||
if (code == TRI_ERROR_NO_ERROR) {
|
if (code == TRI_ERROR_NO_ERROR) {
|
||||||
// update the success counter
|
// update the success counter
|
||||||
|
if (_countStats) {
|
||||||
++_engine->_stats.writesExecuted;
|
++_engine->_stats.writesExecuted;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ignoreErrors) {
|
if (ignoreErrors) {
|
||||||
// update the ignored counter
|
// update the ignored counter
|
||||||
|
if (_countStats) {
|
||||||
++_engine->_stats.writesIgnored;
|
++_engine->_stats.writesIgnored;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,18 +214,24 @@ void ModificationBlock::handleBabyResult(std::unordered_map<int, size_t> const&
|
||||||
if (errorCounter.empty()) {
|
if (errorCounter.empty()) {
|
||||||
// update the success counter
|
// update the success counter
|
||||||
// All successful.
|
// All successful.
|
||||||
|
if (_countStats) {
|
||||||
_engine->_stats.writesExecuted += numBabies;
|
_engine->_stats.writesExecuted += numBabies;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (ignoreAllErrors) {
|
if (ignoreAllErrors) {
|
||||||
for (auto const& pair : errorCounter) {
|
for (auto const& pair : errorCounter) {
|
||||||
// update the ignored counter
|
// update the ignored counter
|
||||||
|
if (_countStats) {
|
||||||
_engine->_stats.writesIgnored += pair.second;
|
_engine->_stats.writesIgnored += pair.second;
|
||||||
|
}
|
||||||
numBabies -= pair.second;
|
numBabies -= pair.second;
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the success counter
|
// update the success counter
|
||||||
|
if (_countStats) {
|
||||||
_engine->_stats.writesExecuted += numBabies;
|
_engine->_stats.writesExecuted += numBabies;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto first = errorCounter.begin();
|
auto first = errorCounter.begin();
|
||||||
|
@ -229,10 +240,14 @@ void ModificationBlock::handleBabyResult(std::unordered_map<int, size_t> const&
|
||||||
if (errorCounter.size() == 1) {
|
if (errorCounter.size() == 1) {
|
||||||
// We only have Document not found. Fix statistics and ignore
|
// We only have Document not found. Fix statistics and ignore
|
||||||
// update the ignored counter
|
// update the ignored counter
|
||||||
|
if (_countStats) {
|
||||||
_engine->_stats.writesIgnored += first->second;
|
_engine->_stats.writesIgnored += first->second;
|
||||||
|
}
|
||||||
numBabies -= first->second;
|
numBabies -= first->second;
|
||||||
// update the success counter
|
// update the success counter
|
||||||
|
if (_countStats) {
|
||||||
_engine->_stats.writesExecuted += numBabies;
|
_engine->_stats.writesExecuted += numBabies;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,10 @@ class ModificationBlock : public ExecutionBlock {
|
||||||
|
|
||||||
/// @brief whether or not the collection uses the default sharding attributes
|
/// @brief whether or not the collection uses the default sharding attributes
|
||||||
bool _usesDefaultSharding;
|
bool _usesDefaultSharding;
|
||||||
|
|
||||||
|
/// @brief whether this block contributes to statistics.
|
||||||
|
/// Will only be disabled in SmartGraphCase.
|
||||||
|
bool _countStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RemoveBlock : public ModificationBlock {
|
class RemoveBlock : public ModificationBlock {
|
||||||
|
|
|
@ -42,7 +42,8 @@ ModificationNode::ModificationNode(ExecutionPlan* plan,
|
||||||
_outVariableOld(
|
_outVariableOld(
|
||||||
Variable::varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
|
Variable::varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
|
||||||
_outVariableNew(
|
_outVariableNew(
|
||||||
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)) {
|
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)),
|
||||||
|
_countStats(base.get("countStats").getBool()) {
|
||||||
TRI_ASSERT(_vocbase != nullptr);
|
TRI_ASSERT(_vocbase != nullptr);
|
||||||
TRI_ASSERT(_collection != nullptr);
|
TRI_ASSERT(_collection != nullptr);
|
||||||
}
|
}
|
||||||
|
@ -55,6 +56,7 @@ void ModificationNode::toVelocyPackHelper(VPackBuilder& builder,
|
||||||
// Now put info about vocbase and cid in there
|
// Now put info about vocbase and cid in there
|
||||||
builder.add("database", VPackValue(_vocbase->name()));
|
builder.add("database", VPackValue(_vocbase->name()));
|
||||||
builder.add("collection", VPackValue(_collection->getName()));
|
builder.add("collection", VPackValue(_collection->getName()));
|
||||||
|
builder.add("countStats", VPackValue(_countStats));
|
||||||
|
|
||||||
// add out variables
|
// add out variables
|
||||||
if (_outVariableOld != nullptr) {
|
if (_outVariableOld != nullptr) {
|
||||||
|
@ -66,6 +68,7 @@ void ModificationNode::toVelocyPackHelper(VPackBuilder& builder,
|
||||||
_outVariableNew->toVelocyPack(builder);
|
_outVariableNew->toVelocyPack(builder);
|
||||||
}
|
}
|
||||||
builder.add(VPackValue("modificationFlags"));
|
builder.add(VPackValue("modificationFlags"));
|
||||||
|
|
||||||
_options.toVelocyPack(builder);
|
_options.toVelocyPack(builder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,6 +117,9 @@ ExecutionNode* RemoveNode::clone(ExecutionPlan* plan, bool withDependencies,
|
||||||
|
|
||||||
auto c = new RemoveNode(plan, _id, _vocbase, _collection, _options,
|
auto c = new RemoveNode(plan, _id, _vocbase, _collection, _options,
|
||||||
inVariable, outVariableOld);
|
inVariable, outVariableOld);
|
||||||
|
if (!_countStats) {
|
||||||
|
c->disableStatistics();
|
||||||
|
}
|
||||||
|
|
||||||
cloneHelper(c, withDependencies, withProperties);
|
cloneHelper(c, withDependencies, withProperties);
|
||||||
|
|
||||||
|
@ -153,6 +159,9 @@ ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
|
||||||
|
|
||||||
auto c = new InsertNode(plan, _id, _vocbase, _collection, _options,
|
auto c = new InsertNode(plan, _id, _vocbase, _collection, _options,
|
||||||
inVariable, outVariableNew);
|
inVariable, outVariableNew);
|
||||||
|
if (!_countStats) {
|
||||||
|
c->disableStatistics();
|
||||||
|
}
|
||||||
|
|
||||||
cloneHelper(c, withDependencies, withProperties);
|
cloneHelper(c, withDependencies, withProperties);
|
||||||
|
|
||||||
|
@ -209,6 +218,9 @@ ExecutionNode* UpdateNode::clone(ExecutionPlan* plan, bool withDependencies,
|
||||||
auto c =
|
auto c =
|
||||||
new UpdateNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
|
new UpdateNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
|
||||||
inKeyVariable, outVariableOld, outVariableNew);
|
inKeyVariable, outVariableOld, outVariableNew);
|
||||||
|
if (!_countStats) {
|
||||||
|
c->disableStatistics();
|
||||||
|
}
|
||||||
|
|
||||||
cloneHelper(c, withDependencies, withProperties);
|
cloneHelper(c, withDependencies, withProperties);
|
||||||
|
|
||||||
|
@ -266,6 +278,9 @@ ExecutionNode* ReplaceNode::clone(ExecutionPlan* plan, bool withDependencies,
|
||||||
auto c =
|
auto c =
|
||||||
new ReplaceNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
|
new ReplaceNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
|
||||||
inKeyVariable, outVariableOld, outVariableNew);
|
inKeyVariable, outVariableOld, outVariableNew);
|
||||||
|
if (!_countStats) {
|
||||||
|
c->disableStatistics();
|
||||||
|
}
|
||||||
|
|
||||||
cloneHelper(c, withDependencies, withProperties);
|
cloneHelper(c, withDependencies, withProperties);
|
||||||
|
|
||||||
|
@ -319,6 +334,9 @@ ExecutionNode* UpsertNode::clone(ExecutionPlan* plan, bool withDependencies,
|
||||||
auto c = new UpsertNode(plan, _id, _vocbase, _collection, _options,
|
auto c = new UpsertNode(plan, _id, _vocbase, _collection, _options,
|
||||||
inDocVariable, insertVariable, updateVariable,
|
inDocVariable, insertVariable, updateVariable,
|
||||||
outVariableNew, _isReplace);
|
outVariableNew, _isReplace);
|
||||||
|
if (!_countStats) {
|
||||||
|
c->disableStatistics();
|
||||||
|
}
|
||||||
|
|
||||||
cloneHelper(c, withDependencies, withProperties);
|
cloneHelper(c, withDependencies, withProperties);
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,8 @@ class ModificationNode : public ExecutionNode {
|
||||||
_collection(collection),
|
_collection(collection),
|
||||||
_options(options),
|
_options(options),
|
||||||
_outVariableOld(outVariableOld),
|
_outVariableOld(outVariableOld),
|
||||||
_outVariableNew(outVariableNew) {
|
_outVariableNew(outVariableNew),
|
||||||
|
_countStats(true) {
|
||||||
TRI_ASSERT(_vocbase != nullptr);
|
TRI_ASSERT(_vocbase != nullptr);
|
||||||
TRI_ASSERT(_collection != nullptr);
|
TRI_ASSERT(_collection != nullptr);
|
||||||
}
|
}
|
||||||
|
@ -127,6 +128,12 @@ class ModificationNode : public ExecutionNode {
|
||||||
/// @brief whether or not the node is a data modification node
|
/// @brief whether or not the node is a data modification node
|
||||||
bool isModificationNode() const override { return true; }
|
bool isModificationNode() const override { return true; }
|
||||||
|
|
||||||
|
/// @brief whether this node contributes to statistics. Only disabled in SmartGraph case
|
||||||
|
bool countStats() const { return _countStats; }
|
||||||
|
|
||||||
|
/// @brief Disable that this node is contributing to statistics. Only disabled in SmartGraph case
|
||||||
|
void disableStatistics() { _countStats = false; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// @brief _vocbase, the database
|
/// @brief _vocbase, the database
|
||||||
TRI_vocbase_t* _vocbase;
|
TRI_vocbase_t* _vocbase;
|
||||||
|
@ -142,6 +149,9 @@ class ModificationNode : public ExecutionNode {
|
||||||
|
|
||||||
/// @brief output variable ($NEW)
|
/// @brief output variable ($NEW)
|
||||||
Variable const* _outVariableNew;
|
Variable const* _outVariableNew;
|
||||||
|
|
||||||
|
/// @brief whether this node contributes to statistics. Only disabled in SmartGraph case
|
||||||
|
bool _countStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief class RemoveNode
|
/// @brief class RemoveNode
|
||||||
|
|
|
@ -2916,7 +2916,9 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt,
|
||||||
plan->registerNode(gatherNode);
|
plan->registerNode(gatherNode);
|
||||||
TRI_ASSERT(remoteNode);
|
TRI_ASSERT(remoteNode);
|
||||||
gatherNode->addDependency(remoteNode);
|
gatherNode->addDependency(remoteNode);
|
||||||
if (!elements.empty() && gatherNode->collection()->numberOfShards() > 1) {
|
// On SmartEdge collections we have 0 shards and we need the elements
|
||||||
|
// to be injected here as well. So do not replace it with > 1
|
||||||
|
if (!elements.empty() && gatherNode->collection()->numberOfShards() != 1) {
|
||||||
gatherNode->setElements(elements);
|
gatherNode->setElements(elements);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3443,7 +3445,9 @@ void arangodb::aql::distributeSortToClusterRule(
|
||||||
if (thisSortNode->_reinsertInCluster) {
|
if (thisSortNode->_reinsertInCluster) {
|
||||||
plan->insertDependency(rn, inspectNode);
|
plan->insertDependency(rn, inspectNode);
|
||||||
}
|
}
|
||||||
if (gatherNode->collection()->numberOfShards() > 1) {
|
// On SmartEdge collections we have 0 shards and we need the elements
|
||||||
|
// to be injected here as well. So do not replace it with > 1
|
||||||
|
if (gatherNode->collection()->numberOfShards() != 1) {
|
||||||
gatherNode->setElements(thisSortNode->getElements());
|
gatherNode->setElements(thisSortNode->getElements());
|
||||||
}
|
}
|
||||||
modified = true;
|
modified = true;
|
||||||
|
|
|
@ -68,6 +68,20 @@ using namespace arangodb::rest;
|
||||||
uint64_t const RestReplicationHandler::_defaultChunkSize = 128 * 1024;
|
uint64_t const RestReplicationHandler::_defaultChunkSize = 128 * 1024;
|
||||||
uint64_t const RestReplicationHandler::_maxChunkSize = 128 * 1024 * 1024;
|
uint64_t const RestReplicationHandler::_maxChunkSize = 128 * 1024 * 1024;
|
||||||
|
|
||||||
|
static bool ignoreHiddenEnterpriseCollection(std::string const& name, bool force) {
|
||||||
|
#ifdef USE_ENTERPRISE
|
||||||
|
if (!force && name[0] == '_') {
|
||||||
|
if (strncmp(name.c_str(), "_local_", 7) == 0 ||
|
||||||
|
strncmp(name.c_str(), "_from_", 6) == 0 ||
|
||||||
|
strncmp(name.c_str(), "_to_", 4) == 0) {
|
||||||
|
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "Restore ignoring collection " << name << ". Will be created via SmartGraphs of a full dump. If you want to restore ONLY this collection use 'arangorestore --force'. However this is not recommended and you should instead restore the EdgeCollection of the SmartGraph instead.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static Result restoreDataParser(char const* ptr, char const* pos,
|
static Result restoreDataParser(char const* ptr, char const* pos,
|
||||||
std::string const& collectionName,
|
std::string const& collectionName,
|
||||||
std::string& key,
|
std::string& key,
|
||||||
|
@ -969,6 +983,10 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
|
||||||
return Result(TRI_ERROR_HTTP_BAD_PARAMETER, "collection name is missing");
|
return Result(TRI_ERROR_HTTP_BAD_PARAMETER, "collection name is missing");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ignoreHiddenEnterpriseCollection(name, force)) {
|
||||||
|
return {TRI_ERROR_NO_ERROR};
|
||||||
|
}
|
||||||
|
|
||||||
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
|
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
|
||||||
false)) {
|
false)) {
|
||||||
// we don't care about deleted collections
|
// we don't care about deleted collections
|
||||||
|
@ -1042,7 +1060,10 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
|
||||||
|
|
||||||
// Replication Factor. Will be overwritten if not existent
|
// Replication Factor. Will be overwritten if not existent
|
||||||
VPackSlice const replFactorSlice = parameters.get("replicationFactor");
|
VPackSlice const replFactorSlice = parameters.get("replicationFactor");
|
||||||
if (!replFactorSlice.isInteger()) {
|
bool isValidReplFactorSlice =
|
||||||
|
replFactorSlice.isInteger() ||
|
||||||
|
(replFactorSlice.isString() && replFactorSlice.isEqualString("satellite"));
|
||||||
|
if (!isValidReplFactorSlice) {
|
||||||
if (replicationFactor == 0) {
|
if (replicationFactor == 0) {
|
||||||
replicationFactor = 1;
|
replicationFactor = 1;
|
||||||
}
|
}
|
||||||
|
@ -1057,6 +1078,11 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
|
||||||
// system collection?
|
// system collection?
|
||||||
toMerge.add("isSystem", VPackValue(true));
|
toMerge.add("isSystem", VPackValue(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Always ignore `shadowCollections` they were accidentially dumped in arangodb versions
|
||||||
|
// earlier than 3.3.6
|
||||||
|
toMerge.add("shadowCollections", arangodb::basics::VelocyPackHelper::NullValue());
|
||||||
toMerge.close(); // TopLevel
|
toMerge.close(); // TopLevel
|
||||||
|
|
||||||
VPackSlice const type = parameters.get("type");
|
VPackSlice const type = parameters.get("type");
|
||||||
|
@ -1068,7 +1094,7 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
|
||||||
|
|
||||||
VPackSlice const sliceToMerge = toMerge.slice();
|
VPackSlice const sliceToMerge = toMerge.slice();
|
||||||
VPackBuilder mergedBuilder =
|
VPackBuilder mergedBuilder =
|
||||||
VPackCollection::merge(parameters, sliceToMerge, false);
|
VPackCollection::merge(parameters, sliceToMerge, false, true);
|
||||||
VPackSlice const merged = mergedBuilder.slice();
|
VPackSlice const merged = mergedBuilder.slice();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -1107,6 +1133,21 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
Result RestReplicationHandler::processRestoreData(std::string const& colName) {
|
Result RestReplicationHandler::processRestoreData(std::string const& colName) {
|
||||||
|
#ifdef USE_ENTERPRISE
|
||||||
|
{
|
||||||
|
bool force = false;
|
||||||
|
bool found = false;
|
||||||
|
std::string const& forceVal = _request->value("force", found);
|
||||||
|
|
||||||
|
if (found) {
|
||||||
|
force = StringUtils::boolean(forceVal);
|
||||||
|
}
|
||||||
|
if (ignoreHiddenEnterpriseCollection(colName, force)) {
|
||||||
|
return {TRI_ERROR_NO_ERROR};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
grantTemporaryRights();
|
grantTemporaryRights();
|
||||||
|
|
||||||
if (colName == "_users") {
|
if (colName == "_users") {
|
||||||
|
@ -1476,6 +1517,7 @@ int RestReplicationHandler::processRestoreIndexes(VPackSlice const& collection,
|
||||||
return TRI_ERROR_HTTP_BAD_PARAMETER;
|
return TRI_ERROR_HTTP_BAD_PARAMETER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VPackSlice const parameters = collection.get("parameters");
|
VPackSlice const parameters = collection.get("parameters");
|
||||||
|
|
||||||
if (!parameters.isObject()) {
|
if (!parameters.isObject()) {
|
||||||
|
@ -1613,6 +1655,10 @@ int RestReplicationHandler::processRestoreIndexesCoordinator(
|
||||||
return TRI_ERROR_HTTP_BAD_PARAMETER;
|
return TRI_ERROR_HTTP_BAD_PARAMETER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ignoreHiddenEnterpriseCollection(name, force)) {
|
||||||
|
return {TRI_ERROR_NO_ERROR};
|
||||||
|
}
|
||||||
|
|
||||||
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
|
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
|
||||||
false)) {
|
false)) {
|
||||||
// we don't care about deleted collections
|
// we don't care about deleted collections
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
#include <velocypack/Collection.h>
|
||||||
#include <velocypack/Iterator.h>
|
#include <velocypack/Iterator.h>
|
||||||
#include <velocypack/velocypack-aliases.h>
|
#include <velocypack/velocypack-aliases.h>
|
||||||
|
|
||||||
|
@ -903,6 +904,10 @@ int DumpFeature::runClusterDump(std::string& errorMsg) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isIgnoredHiddenEnterpriseCollection(name)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!_ignoreDistributeShardsLikeErrors) {
|
if (!_ignoreDistributeShardsLikeErrors) {
|
||||||
std::string prototypeCollection =
|
std::string prototypeCollection =
|
||||||
arangodb::basics::VelocyPackHelper::getStringValue(
|
arangodb::basics::VelocyPackHelper::getStringValue(
|
||||||
|
@ -954,7 +959,21 @@ int DumpFeature::runClusterDump(std::string& errorMsg) {
|
||||||
|
|
||||||
beginEncryption(fd);
|
beginEncryption(fd);
|
||||||
|
|
||||||
std::string const collectionInfo = collection.toJson();
|
VPackBuilder excludes;
|
||||||
|
{ // { parameters: { shadowCollections: null } }
|
||||||
|
excludes.add(VPackValue(VPackValueType::Object));
|
||||||
|
excludes.add("parameters", VPackValue(VPackValueType::Object));
|
||||||
|
excludes.add("shadowCollections", VPackSlice::nullSlice());
|
||||||
|
excludes.close();
|
||||||
|
excludes.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
VPackBuilder collectionWithExcludedParametersBuilder
|
||||||
|
= VPackCollection::merge(collection, excludes.slice(), true, true);
|
||||||
|
|
||||||
|
std::string const collectionInfo =
|
||||||
|
collectionWithExcludedParametersBuilder.slice().toJson();
|
||||||
|
|
||||||
bool result =
|
bool result =
|
||||||
writeData(fd, collectionInfo.c_str(), collectionInfo.size());
|
writeData(fd, collectionInfo.c_str(), collectionInfo.size());
|
||||||
|
|
||||||
|
@ -1237,3 +1256,23 @@ void DumpFeature::endEncryption(int fd) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool DumpFeature::isIgnoredHiddenEnterpriseCollection(
|
||||||
|
std::string const& name) const {
|
||||||
|
#ifdef USE_ENTERPRISE
|
||||||
|
if (!_force && name[0] == '_') {
|
||||||
|
if (strncmp(name.c_str(), "_local_", 7) == 0 ||
|
||||||
|
strncmp(name.c_str(), "_from_", 6) == 0 ||
|
||||||
|
strncmp(name.c_str(), "_to_", 4) == 0) {
|
||||||
|
LOG_TOPIC(INFO, arangodb::Logger::FIXME)
|
||||||
|
<< "Dump ignoring collection " << name
|
||||||
|
<< ". Will be created via SmartGraphs of a full dump. If you want to "
|
||||||
|
"dump this collection anyway use 'arangodump --force'. "
|
||||||
|
"However this is not recommended and you should instead dump "
|
||||||
|
"the EdgeCollection of the SmartGraph instead.";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
|
@ -78,6 +78,8 @@ class DumpFeature final : public application_features::ApplicationFeature,
|
||||||
void beginEncryption(int fd);
|
void beginEncryption(int fd);
|
||||||
void endEncryption(int fd);
|
void endEncryption(int fd);
|
||||||
|
|
||||||
|
bool isIgnoredHiddenEnterpriseCollection(std::string const &name) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int* _result;
|
int* _result;
|
||||||
uint64_t _batchId;
|
uint64_t _batchId;
|
||||||
|
@ -91,6 +93,7 @@ class DumpFeature final : public application_features::ApplicationFeature,
|
||||||
uint64_t _totalCollections;
|
uint64_t _totalCollections;
|
||||||
uint64_t _totalWritten;
|
uint64_t _totalWritten;
|
||||||
} _stats;
|
} _stats;
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -343,10 +343,12 @@ ArangoCollection.prototype.properties = function (properties) {
|
||||||
var attributes = {
|
var attributes = {
|
||||||
'doCompact': true,
|
'doCompact': true,
|
||||||
'journalSize': true,
|
'journalSize': true,
|
||||||
|
'isSmart': false,
|
||||||
'isSystem': false,
|
'isSystem': false,
|
||||||
'isVolatile': false,
|
'isVolatile': false,
|
||||||
'waitForSync': true,
|
'waitForSync': true,
|
||||||
'shardKeys': false,
|
'shardKeys': false,
|
||||||
|
'smartGraphAttribute': false,
|
||||||
'numberOfShards': false,
|
'numberOfShards': false,
|
||||||
'keyOptions': false,
|
'keyOptions': false,
|
||||||
'indexBuckets': true,
|
'indexBuckets': true,
|
||||||
|
|
|
@ -28,8 +28,11 @@
|
||||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
var internal = require("internal");
|
const fs = require('fs');
|
||||||
var jsunity = require("jsunity");
|
const internal = require("internal");
|
||||||
|
const jsunity = require("jsunity");
|
||||||
|
const isEnterprise = internal.isEnterprise();
|
||||||
|
const db = internal.db;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief test suite
|
/// @brief test suite
|
||||||
|
@ -37,7 +40,6 @@ var jsunity = require("jsunity");
|
||||||
|
|
||||||
function dumpTestSuite () {
|
function dumpTestSuite () {
|
||||||
'use strict';
|
'use strict';
|
||||||
var db = internal.db;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
||||||
|
@ -320,11 +322,300 @@ function dumpTestSuite () {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test suite for the enterprise mode
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function dumpTestEnterpriseSuite () {
|
||||||
|
const smartGraphName = "UnitTestDumpSmartGraph";
|
||||||
|
const edges = "UnitTestDumpSmartEdges";
|
||||||
|
const vertices = "UnitTestDumpSmartVertices";
|
||||||
|
const orphans = "UnitTestDumpSmartOrphans";
|
||||||
|
const satellite = "UnitTestDumpSatelliteCollection";
|
||||||
|
const gm = require("@arangodb/smart-graph");
|
||||||
|
const instanceInfo = JSON.parse(require('internal').env.INSTANCEINFO);
|
||||||
|
|
||||||
|
return {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief set up
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
setUp : function () {
|
||||||
|
},
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief tear down
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
tearDown : function () {
|
||||||
|
},
|
||||||
|
|
||||||
|
testSatelliteCollections : function () {
|
||||||
|
let c = db._collection(satellite);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(1, p.numberOfShards);
|
||||||
|
assertEqual("satellite", p.replicationFactor);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testHiddenCollectionsOmitted : function () {
|
||||||
|
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
|
||||||
|
|
||||||
|
const smartEdgeCollectionPath = fs.join(dumpDir, `${edges}.structure.json`);
|
||||||
|
const localEdgeCollectionPath = fs.join(dumpDir, `_local_${edges}.structure.json`);
|
||||||
|
const fromEdgeCollectionPath = fs.join(dumpDir, `_from_${edges}.structure.json`);
|
||||||
|
const toEdgeCollectionPath = fs.join(dumpDir, `_to_${edges}.structure.json`);
|
||||||
|
|
||||||
|
assertTrue(fs.exists(smartEdgeCollectionPath), 'Smart edge collection missing in dump!');
|
||||||
|
assertFalse(fs.exists(localEdgeCollectionPath), '_local edge collection should not have been dumped!');
|
||||||
|
assertFalse(fs.exists(fromEdgeCollectionPath), '_from edge collection should not have been dumped!');
|
||||||
|
assertFalse(fs.exists(toEdgeCollectionPath), '_to edge collection should not have been dumped!');
|
||||||
|
},
|
||||||
|
|
||||||
|
testShadowCollectionsOmitted : function () {
|
||||||
|
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
|
||||||
|
const collStructure = JSON.parse(
|
||||||
|
fs.read(fs.join(dumpDir, `${edges}.structure.json`))
|
||||||
|
);
|
||||||
|
|
||||||
|
assertTrue(collStructure.hasOwnProperty('parameters'), collStructure);
|
||||||
|
const parameters = collStructure['parameters'];
|
||||||
|
assertFalse(parameters.hasOwnProperty('shadowCollections'),
|
||||||
|
`Property 'shadowCollections' should be hidden in collection ${edges}!`);
|
||||||
|
},
|
||||||
|
|
||||||
|
testVertices : function () {
|
||||||
|
let c = db._collection(vertices);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart, p);
|
||||||
|
assertFalse(Object.hasOwnProperty(p, "distributeShardsLike"));
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
assertEqual("value", p.smartGraphAttribute);
|
||||||
|
},
|
||||||
|
|
||||||
|
testVerticesAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${vertices} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(100, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
assertEqual(String(i), res1[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(1, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testVerticesAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
assertEqual(100, db[vertices].count());
|
||||||
|
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${vertices}`;
|
||||||
|
let update = `FOR x IN ${vertices} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${vertices}`;
|
||||||
|
let remove = `FOR x IN ${vertices} FILTER x.needRemove REMOVE x INTO ${vertices}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remove them again
|
||||||
|
let resIns = db._query(insert);
|
||||||
|
assertEqual(100, resIns.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resIns.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, db[vertices].count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(100, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, db[vertices].count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(100, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(100, db[vertices].count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphans : function () {
|
||||||
|
let c = db._collection(orphans);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart);
|
||||||
|
assertEqual(vertices, p.distributeShardsLike);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
assertEqual("value", p.smartGraphAttribute);
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphansAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${orphans} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${orphans} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${orphans} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(100, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
assertEqual(String(i), res1[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(1, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphansAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
let c = db[orphans];
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${orphans}`;
|
||||||
|
let update = `FOR x IN ${orphans} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${orphans}`;
|
||||||
|
let remove = `FOR x IN ${orphans} FILTER x.needRemove REMOVE x INTO ${orphans}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
|
||||||
|
let resIns = db._query(insert);
|
||||||
|
assertEqual(100, resIns.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resIns.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, c.count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(100, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, c.count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(100, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdges : function () {
|
||||||
|
let c = db._collection(edges);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(3, c.type()); // Edges
|
||||||
|
//assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart);
|
||||||
|
assertEqual(vertices, p.distributeShardsLike);
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdgesAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${edges} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${edges} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${edges} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(300, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
// We have three edges per value
|
||||||
|
assertEqual(String(i), res1[3*i].value);
|
||||||
|
assertEqual(String(i), res1[3*i+1].value);
|
||||||
|
assertEqual(String(i), res1[3*i+2].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(3, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdgesAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
let c = db[edges];
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
|
||||||
|
// We first need the vertices
|
||||||
|
let vC = db[vertices];
|
||||||
|
assertEqual(100, vC.count());
|
||||||
|
let vQ = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x._id`;
|
||||||
|
let verticesList = db._query(vQ).toArray();
|
||||||
|
let insertSameValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[i], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
|
||||||
|
let insertOtherValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[(i + 1) % 100], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
|
||||||
|
let update = `FOR x IN ${edges} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${edges}`;
|
||||||
|
let remove = `FOR x IN ${edges} FILTER x.needRemove REMOVE x INTO ${edges}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
|
||||||
|
let resInsSame = db._query(insertSameValue, {vertices: verticesList});
|
||||||
|
assertEqual(100, resInsSame.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resInsSame.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(400, c.count());
|
||||||
|
|
||||||
|
let resInsOther = db._query(insertOtherValue, {vertices: verticesList});
|
||||||
|
assertEqual(100, resInsOther.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resInsOther.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(500, c.count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(200, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(500, c.count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(200, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testAqlGraphQuery: function() {
|
||||||
|
// Precondition
|
||||||
|
let c = db[edges];
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
// We first need the vertices
|
||||||
|
let vC = db[vertices];
|
||||||
|
assertEqual(100, vC.count());
|
||||||
|
|
||||||
|
let vertexQuery = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x._id`
|
||||||
|
let vertex = db._query(vertexQuery).toArray();
|
||||||
|
assertEqual(1, vertex.length);
|
||||||
|
|
||||||
|
let q = `FOR v IN 1..2 ANY "${vertex[0]}" GRAPH "${smartGraphName}" OPTIONS {uniqueVertices: 'path'} SORT TO_NUMBER(v.value) RETURN v`;
|
||||||
|
/* We expect the following result:
|
||||||
|
* 10 <- 9 <- 8
|
||||||
|
* 10 <- 9
|
||||||
|
* 10 -> 11
|
||||||
|
* 10 -> 11 -> 12
|
||||||
|
*/
|
||||||
|
|
||||||
|
//Validate that everything is wired to a smart graph correctly
|
||||||
|
let res = db._query(q).toArray();
|
||||||
|
assertEqual(4, res.length)
|
||||||
|
assertEqual("8", res[0].value);
|
||||||
|
assertEqual("9", res[1].value);
|
||||||
|
assertEqual("11", res[2].value);
|
||||||
|
assertEqual("12", res[3].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief executes the test suite
|
/// @brief executes the test suite
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
jsunity.run(dumpTestSuite);
|
jsunity.run(dumpTestSuite);
|
||||||
|
if (isEnterprise) {
|
||||||
|
jsunity.run(dumpTestEnterpriseSuite);
|
||||||
|
}
|
||||||
|
|
||||||
return jsunity.done();
|
return jsunity.done();
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,11 @@
|
||||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
var internal = require("internal");
|
const fs = require('fs');
|
||||||
var jsunity = require("jsunity");
|
const internal = require("internal");
|
||||||
|
const jsunity = require("jsunity");
|
||||||
|
const isEnterprise = internal.isEnterprise();
|
||||||
|
const db = internal.db;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief test suite
|
/// @brief test suite
|
||||||
|
@ -37,7 +40,6 @@ var jsunity = require("jsunity");
|
||||||
|
|
||||||
function dumpTestSuite () {
|
function dumpTestSuite () {
|
||||||
'use strict';
|
'use strict';
|
||||||
var db = internal.db;
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
||||||
|
@ -302,11 +304,300 @@ function dumpTestSuite () {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief test suite for the enterprise mode
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function dumpTestEnterpriseSuite () {
|
||||||
|
const smartGraphName = "UnitTestDumpSmartGraph";
|
||||||
|
const edges = "UnitTestDumpSmartEdges";
|
||||||
|
const vertices = "UnitTestDumpSmartVertices";
|
||||||
|
const orphans = "UnitTestDumpSmartOrphans";
|
||||||
|
const satellite = "UnitTestDumpSatelliteCollection";
|
||||||
|
const gm = require("@arangodb/smart-graph");
|
||||||
|
const instanceInfo = JSON.parse(require('internal').env.INSTANCEINFO);
|
||||||
|
|
||||||
|
return {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief set up
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
setUp : function () {
|
||||||
|
},
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief tear down
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
tearDown : function () {
|
||||||
|
},
|
||||||
|
|
||||||
|
testSatelliteCollections : function () {
|
||||||
|
let c = db._collection(satellite);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(1, p.numberOfShards);
|
||||||
|
assertEqual("satellite", p.replicationFactor);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testHiddenCollectionsOmitted : function () {
|
||||||
|
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
|
||||||
|
|
||||||
|
const smartEdgeCollectionPath = fs.join(dumpDir, `${edges}.structure.json`);
|
||||||
|
const localEdgeCollectionPath = fs.join(dumpDir, `_local_${edges}.structure.json`);
|
||||||
|
const fromEdgeCollectionPath = fs.join(dumpDir, `_from_${edges}.structure.json`);
|
||||||
|
const toEdgeCollectionPath = fs.join(dumpDir, `_to_${edges}.structure.json`);
|
||||||
|
|
||||||
|
assertTrue(fs.exists(smartEdgeCollectionPath), 'Smart edge collection missing in dump!');
|
||||||
|
assertFalse(fs.exists(localEdgeCollectionPath), '_local edge collection should not have been dumped!');
|
||||||
|
assertFalse(fs.exists(fromEdgeCollectionPath), '_from edge collection should not have been dumped!');
|
||||||
|
assertFalse(fs.exists(toEdgeCollectionPath), '_to edge collection should not have been dumped!');
|
||||||
|
},
|
||||||
|
|
||||||
|
testShadowCollectionsOmitted : function () {
|
||||||
|
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
|
||||||
|
const collStructure = JSON.parse(
|
||||||
|
fs.read(fs.join(dumpDir, `${edges}.structure.json`))
|
||||||
|
);
|
||||||
|
|
||||||
|
assertTrue(collStructure.hasOwnProperty('parameters'), collStructure);
|
||||||
|
const parameters = collStructure['parameters'];
|
||||||
|
assertFalse(parameters.hasOwnProperty('shadowCollections'),
|
||||||
|
`Property 'shadowCollections' should be hidden in collection ${edges}!`);
|
||||||
|
},
|
||||||
|
|
||||||
|
testVertices : function () {
|
||||||
|
let c = db._collection(vertices);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart, p);
|
||||||
|
assertFalse(Object.hasOwnProperty(p, "distributeShardsLike"));
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
assertEqual("value", p.smartGraphAttribute);
|
||||||
|
},
|
||||||
|
|
||||||
|
testVerticesAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${vertices} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(100, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
assertEqual(String(i), res1[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(1, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testVerticesAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
assertEqual(100, db[vertices].count());
|
||||||
|
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${vertices}`;
|
||||||
|
let update = `FOR x IN ${vertices} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${vertices}`;
|
||||||
|
let remove = `FOR x IN ${vertices} FILTER x.needRemove REMOVE x INTO ${vertices}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
|
||||||
|
let resIns = db._query(insert);
|
||||||
|
assertEqual(100, resIns.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resIns.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, db[vertices].count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(100, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, db[vertices].count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(100, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(100, db[vertices].count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphans : function () {
|
||||||
|
let c = db._collection(orphans);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(2, c.type()); // Document
|
||||||
|
assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart);
|
||||||
|
assertEqual(vertices, p.distributeShardsLike);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
assertEqual("value", p.smartGraphAttribute);
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphansAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${orphans} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${orphans} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${orphans} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(100, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
assertEqual(String(i), res1[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(1, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testOrphansAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
let c = db[orphans];
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${orphans}`;
|
||||||
|
let update = `FOR x IN ${orphans} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${orphans}`;
|
||||||
|
let remove = `FOR x IN ${orphans} FILTER x.needRemove REMOVE x INTO ${orphans}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
|
||||||
|
let resIns = db._query(insert);
|
||||||
|
assertEqual(100, resIns.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resIns.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, c.count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(100, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(200, c.count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(100, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(100, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdges : function () {
|
||||||
|
let c = db._collection(edges);
|
||||||
|
let p = c.properties();
|
||||||
|
assertEqual(3, c.type()); // Edges
|
||||||
|
//assertEqual(5, p.numberOfShards);
|
||||||
|
assertTrue(p.isSmart);
|
||||||
|
assertEqual(vertices, p.distributeShardsLike);
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdgesAqlRead: function () {
|
||||||
|
let q1 = `FOR x IN ${edges} SORT TO_NUMBER(x.value) RETURN x`;
|
||||||
|
let q2 = `FOR x IN ${edges} FILTER x.value == "10" RETURN x.value`;
|
||||||
|
// This query can be optimized to a single shard. Make sure that is still correct
|
||||||
|
let q3 = `FOR x IN ${edges} FILTER x._key == @key RETURN x.value`;
|
||||||
|
|
||||||
|
let res1 = db._query(q1).toArray();
|
||||||
|
assertEqual(300, res1.length);
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
// We have three edges per value
|
||||||
|
assertEqual(String(i), res1[3*i].value);
|
||||||
|
assertEqual(String(i), res1[3*i+1].value);
|
||||||
|
assertEqual(String(i), res1[3*i+2].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res2 = db._query(q2).toArray();
|
||||||
|
assertEqual(3, res2.length);
|
||||||
|
assertEqual("10", res2[0]);
|
||||||
|
|
||||||
|
for (let x of res1) {
|
||||||
|
let res3 = db._query(q3, {key: x._key}).toArray();
|
||||||
|
assertEqual(1, res3.length);
|
||||||
|
assertEqual(x.value, res3[0]);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
testEdgesAqlInsert: function () {
|
||||||
|
// Precondition
|
||||||
|
let c = db[edges];
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
|
||||||
|
// We first need the vertices
|
||||||
|
let vC = db[vertices];
|
||||||
|
assertEqual(100, vC.count());
|
||||||
|
let vQ = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x._id`;
|
||||||
|
let verticesList = db._query(vQ).toArray();
|
||||||
|
let insertSameValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[i], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
|
||||||
|
let insertOtherValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[(i + 1) % 100], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
|
||||||
|
let update = `FOR x IN ${edges} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${edges}`;
|
||||||
|
let remove = `FOR x IN ${edges} FILTER x.needRemove REMOVE x INTO ${edges}`;
|
||||||
|
// Note: Order is important here, we first insert, than update those inserted docs, then remove them again
|
||||||
|
let resInsSame = db._query(insertSameValue, {vertices: verticesList});
|
||||||
|
assertEqual(100, resInsSame.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resInsSame.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(400, c.count());
|
||||||
|
|
||||||
|
let resInsOther = db._query(insertOtherValue, {vertices: verticesList});
|
||||||
|
assertEqual(100, resInsOther.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resInsOther.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(500, c.count());
|
||||||
|
|
||||||
|
let resUp = db._query(update);
|
||||||
|
assertEqual(200, resUp.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resUp.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(500, c.count());
|
||||||
|
|
||||||
|
let resRem = db._query(remove);
|
||||||
|
assertEqual(200, resRem.getExtra().stats.writesExecuted);
|
||||||
|
assertEqual(0, resRem.getExtra().stats.writesIgnored);
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
},
|
||||||
|
|
||||||
|
testAqlGraphQuery: function() {
|
||||||
|
// Precondition
|
||||||
|
let c = db[edges];
|
||||||
|
assertEqual(300, c.count());
|
||||||
|
// We first need the vertices
|
||||||
|
let vC = db[vertices];
|
||||||
|
assertEqual(100, vC.count());
|
||||||
|
|
||||||
|
let vertexQuery = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x._id`
|
||||||
|
let vertex = db._query(vertexQuery).toArray();
|
||||||
|
assertEqual(1, vertex.length);
|
||||||
|
|
||||||
|
let q = `FOR v IN 1..2 ANY "${vertex[0]}" GRAPH "${smartGraphName}" OPTIONS {uniqueVertices: 'path'} SORT TO_NUMBER(v.value) RETURN v`;
|
||||||
|
/* We expect the following result:
|
||||||
|
* 10 <- 9 <- 8
|
||||||
|
* 10 <- 9
|
||||||
|
* 10 -> 11
|
||||||
|
* 10 -> 11 -> 12
|
||||||
|
*/
|
||||||
|
|
||||||
|
//Validate that everything is wired to a smart graph correctly
|
||||||
|
let res = db._query(q).toArray();
|
||||||
|
assertEqual(4, res.length)
|
||||||
|
assertEqual("8", res[0].value);
|
||||||
|
assertEqual("9", res[1].value);
|
||||||
|
assertEqual("11", res[2].value);
|
||||||
|
assertEqual("12", res[3].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief executes the test suite
|
/// @brief executes the test suite
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
jsunity.run(dumpTestSuite);
|
jsunity.run(dumpTestSuite);
|
||||||
|
|
||||||
return jsunity.done();
|
if (isEnterprise) {
|
||||||
|
jsunity.run(dumpTestEnterpriseSuite);
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsunity.done();
|
||||||
|
|
|
@ -27,9 +27,75 @@
|
||||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
const db = require("@arangodb").db;
|
||||||
|
const isEnterprise = require("internal").isEnterprise();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Only if enterprise mode:
|
||||||
|
* Creates a smart graph sharded by `value`
|
||||||
|
* That has 100 vertices (value 0 -> 99)
|
||||||
|
* That has 100 orphans (value 0 -> 99)
|
||||||
|
* That has 300 edges, for each value i:
|
||||||
|
* Connect i -> i
|
||||||
|
* Connect i - 1 -> i
|
||||||
|
* Connect i -> i + 1
|
||||||
|
*/
|
||||||
|
const setupSmartGraph = function () {
|
||||||
|
if (!isEnterprise) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const smartGraphName = "UnitTestDumpSmartGraph";
|
||||||
|
const edges = "UnitTestDumpSmartEdges";
|
||||||
|
const vertices = "UnitTestDumpSmartVertices";
|
||||||
|
const orphans = "UnitTestDumpSmartOrphans";
|
||||||
|
const gm = require("@arangodb/smart-graph");
|
||||||
|
if (gm._exists(smartGraphName)) {
|
||||||
|
gm._drop(smartGraphName, true);
|
||||||
|
}
|
||||||
|
db._drop(edges);
|
||||||
|
db._drop(vertices);
|
||||||
|
|
||||||
|
gm._create(smartGraphName, [gm._relation(edges, vertices, vertices)],
|
||||||
|
[orphans], {numberOfShards: 5, smartGraphAttribute: "value"});
|
||||||
|
|
||||||
|
let vDocs = [];
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
vDocs.push({value: String(i)});
|
||||||
|
}
|
||||||
|
let saved = db[vertices].save(vDocs).map(v => v._id);
|
||||||
|
let eDocs = [];
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
eDocs.push({_from: saved[(i+1) % 100], _to: saved[i], value: String(i)});
|
||||||
|
eDocs.push({_from: saved[i], _to: saved[i], value: String(i)});
|
||||||
|
eDocs.push({_from: saved[i], _to: saved[(i+1) % 100], value: String(i)});
|
||||||
|
}
|
||||||
|
db[edges].save(eDocs);
|
||||||
|
db[orphans].save(vDocs);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Only if enterprise mode:
|
||||||
|
* Creates a satellite collection with 100 documents
|
||||||
|
*/
|
||||||
|
function setupSatelliteCollections() {
|
||||||
|
if (!isEnterprise) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const satelliteCollectionName = "UnitTestDumpSatelliteCollection";
|
||||||
|
db._drop(satelliteCollectionName);
|
||||||
|
db._create(satelliteCollectionName, {"replicationFactor": "satellite"});
|
||||||
|
|
||||||
|
let vDocs = [];
|
||||||
|
for (let i = 0; i < 100; ++i) {
|
||||||
|
vDocs.push({value: String(i)});
|
||||||
|
}
|
||||||
|
db[satelliteCollectionName].save(vDocs);
|
||||||
|
}
|
||||||
|
|
||||||
(function () {
|
(function () {
|
||||||
'use strict';
|
|
||||||
var db = require("@arangodb").db;
|
|
||||||
var i, c;
|
var i, c;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -153,6 +219,8 @@
|
||||||
c.save({ _key: "text" + i, value: t });
|
c.save({ _key: "text" + i, value: t });
|
||||||
});
|
});
|
||||||
|
|
||||||
|
setupSmartGraph();
|
||||||
|
setupSatelliteCollections();
|
||||||
})();
|
})();
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
Loading…
Reference in New Issue