1
0
Fork 0

fix dump&restore of smart graphs (#5093)

* fix dump&restore of smart graphs (#5093)

* Added tests for dump-restore of SmartGraphs

* Arangosh will now expose isSmart and the smartGraphAttribute on properties

* RestReplicationHandler will now ignore smart-graph collections unless you execute it with force

* Added changelog

* Reactivated original mmfiles dump/restore test

* Skip hidden smart graph collections in arangodump

* Do not dump shadowCollections metadata of smart edge collections

* Cluster optimizer rules for soerted gather nodes now handle virtual edge collections correctly

* Added a dump/restore tests for smartgraphs in rocksdb as well

* Deactivated checks for writesExecuted statistics in dump/restore tests for smartgraphs mmfiles

* Really exclude shadowCollections

* Reduced loglevel

* Added tests

* Don't change single-server behaviour

* Fix tests for omitted shadowCollections and hidden collections

* Activated statistics in MMFIles dump test again and included isEnterprise in rocksdb dump test

* A modification node can now disableStatistics, which means it does not contribute to query->extras() this is only relevant in SmartGraph case so far.

* Added a test to dump&restore satellite collections

* Bugfix: restore satellite collections properly

* Added regression test for internal issue #2237

* Fix bug #2237

* Updated CHANGELOG

* Copied dump/restore tests to rockdsb

* Removed enterprise test

* Added inline comment for smart-edge collections in optimizer rules

* Removed duplicate CHANGELOG entry

* Simplified removal of shadowCollections

* fix jslint

* Made internal.isEnterprise() available in js client context. Taken from 3.3/59f655fa

* fix broken dump_encrypted test

* Removed accidentally copied CHANGELOG entry
This commit is contained in:
Tobias Gödderz 2018-04-16 12:28:13 +02:00 committed by Michael Hackstein
parent 7d70ee2ca0
commit 089b21e3de
18 changed files with 862 additions and 44 deletions

View File

@ -1,6 +1,21 @@
devel
-----
* Fixed internal issue #2237: AQL queries on collections with replicationFactor:
"satellite" crashed arangod in single server mode
* Fixed restore of satellite collections: replicationFactor was set to 1 during
restore
* Fixed dump and restore of smart graphs:
a) The dump will not include the hidden shadow collections anymore, they were dumped
accidentially and only contain duplicated data.
b) Restore will now ignore hidden shadow collections as all data is contained
in the smart-edge collection. You can manually include these collections from an
old dump (3.3.5 or earlier) by using `--force`.
c) Restore of a smart-graph will now create smart collections properly instead
of getting into `TIMEOUT_IN_CLUSTER_OPERATION`
* fixed internal issue #2147 - fixed database filter in UI
* fixed internal issue #2149: number of documents in the UI is not adjusted after moving them

View File

@ -54,7 +54,7 @@ EnumerateCollectionBlock::EnumerateCollectionBlock(
int EnumerateCollectionBlock::initialize() {
DEBUG_BEGIN_BLOCK();
if (_collection->isSatellite()) {
if (ServerState::instance()->isRunningInCluster() && _collection->isSatellite()) {
auto logicalCollection = _collection->getCollection();
auto cid = logicalCollection->planId();
auto& dbName = logicalCollection->vocbase().name();

View File

@ -46,7 +46,8 @@ ModificationBlock::ModificationBlock(ExecutionEngine* engine,
_outRegNew(ExecutionNode::MaxRegisterId),
_collection(ep->_collection),
_isDBServer(false),
_usesDefaultSharding(true) {
_usesDefaultSharding(true),
_countStats(ep->countStats()) {
_trx->pinData(_collection->cid());
@ -182,13 +183,17 @@ void ModificationBlock::handleResult(int code, bool ignoreErrors,
std::string const* errorMessage) {
if (code == TRI_ERROR_NO_ERROR) {
// update the success counter
++_engine->_stats.writesExecuted;
if (_countStats) {
++_engine->_stats.writesExecuted;
}
return;
}
if (ignoreErrors) {
// update the ignored counter
++_engine->_stats.writesIgnored;
if (_countStats) {
++_engine->_stats.writesIgnored;
}
return;
}
@ -208,18 +213,24 @@ void ModificationBlock::handleBabyResult(std::unordered_map<int, size_t> const&
if (errorCounter.empty()) {
// update the success counter
// All successful.
_engine->_stats.writesExecuted += numBabies;
if (_countStats) {
_engine->_stats.writesExecuted += numBabies;
}
return;
}
if (ignoreAllErrors) {
for (auto const& pair : errorCounter) {
// update the ignored counter
_engine->_stats.writesIgnored += pair.second;
if (_countStats) {
_engine->_stats.writesIgnored += pair.second;
}
numBabies -= pair.second;
}
// update the success counter
_engine->_stats.writesExecuted += numBabies;
if (_countStats) {
_engine->_stats.writesExecuted += numBabies;
}
return;
}
auto first = errorCounter.begin();
@ -228,10 +239,14 @@ void ModificationBlock::handleBabyResult(std::unordered_map<int, size_t> const&
if (errorCounter.size() == 1) {
// We only have Document not found. Fix statistics and ignore
// update the ignored counter
_engine->_stats.writesIgnored += first->second;
if (_countStats) {
_engine->_stats.writesIgnored += first->second;
}
numBabies -= first->second;
// update the success counter
_engine->_stats.writesExecuted += numBabies;
if (_countStats) {
_engine->_stats.writesExecuted += numBabies;
}
return;
}

View File

@ -73,6 +73,10 @@ class ModificationBlock : public ExecutionBlock {
/// @brief whether or not the collection uses the default sharding attributes
bool _usesDefaultSharding;
/// @brief whether this block contributes to statistics.
/// Will only be disabled in SmartGraphCase.
bool _countStats;
protected:
/// @brief a Builder object, reused for various tasks to save a few memory allocations

View File

@ -43,7 +43,8 @@ ModificationNode::ModificationNode(ExecutionPlan* plan,
_outVariableOld(
Variable::varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
_outVariableNew(
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)) {
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)),
_countStats(base.get("countStats").getBool()) {
TRI_ASSERT(_vocbase != nullptr);
TRI_ASSERT(_collection != nullptr);
}
@ -56,6 +57,7 @@ void ModificationNode::toVelocyPackHelper(VPackBuilder& builder,
// Now put info about vocbase and cid in there
builder.add("database", VPackValue(_vocbase->name()));
builder.add("collection", VPackValue(_collection->getName()));
builder.add("countStats", VPackValue(_countStats));
// add out variables
if (_outVariableOld != nullptr) {
@ -67,6 +69,7 @@ void ModificationNode::toVelocyPackHelper(VPackBuilder& builder,
_outVariableNew->toVelocyPack(builder);
}
builder.add(VPackValue("modificationFlags"));
_options.toVelocyPack(builder);
}
@ -124,6 +127,9 @@ ExecutionNode* RemoveNode::clone(ExecutionPlan* plan, bool withDependencies,
auto c = new RemoveNode(plan, _id, _vocbase, _collection, _options,
inVariable, outVariableOld);
if (!_countStats) {
c->disableStatistics();
}
cloneHelper(c, withDependencies, withProperties);
@ -172,6 +178,9 @@ ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
auto c = new InsertNode(plan, _id, _vocbase, _collection, _options,
inVariable, outVariableNew);
if (!_countStats) {
c->disableStatistics();
}
cloneHelper(c, withDependencies, withProperties);
@ -237,6 +246,9 @@ ExecutionNode* UpdateNode::clone(ExecutionPlan* plan, bool withDependencies,
auto c =
new UpdateNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
inKeyVariable, outVariableOld, outVariableNew);
if (!_countStats) {
c->disableStatistics();
}
cloneHelper(c, withDependencies, withProperties);
@ -303,6 +315,9 @@ ExecutionNode* ReplaceNode::clone(ExecutionPlan* plan, bool withDependencies,
auto c =
new ReplaceNode(plan, _id, _vocbase, _collection, _options, inDocVariable,
inKeyVariable, outVariableOld, outVariableNew);
if (!_countStats) {
c->disableStatistics();
}
cloneHelper(c, withDependencies, withProperties);
@ -365,6 +380,9 @@ ExecutionNode* UpsertNode::clone(ExecutionPlan* plan, bool withDependencies,
auto c = new UpsertNode(plan, _id, _vocbase, _collection, _options,
inDocVariable, insertVariable, updateVariable,
outVariableNew, _isReplace);
if (!_countStats) {
c->disableStatistics();
}
cloneHelper(c, withDependencies, withProperties);

View File

@ -55,7 +55,8 @@ class ModificationNode : public ExecutionNode {
_collection(collection),
_options(options),
_outVariableOld(outVariableOld),
_outVariableNew(outVariableNew) {
_outVariableNew(outVariableNew),
_countStats(true) {
TRI_ASSERT(_vocbase != nullptr);
TRI_ASSERT(_collection != nullptr);
}
@ -127,6 +128,12 @@ class ModificationNode : public ExecutionNode {
/// @brief whether or not the node is a data modification node
bool isModificationNode() const override { return true; }
/// @brief whether this node contributes to statistics. Only disabled in SmartGraph case
bool countStats() const { return _countStats; }
/// @brief Disable that this node is contributing to statistics. Only disabled in SmartGraph case
void disableStatistics() { _countStats = false; }
protected:
/// @brief _vocbase, the database
TRI_vocbase_t* _vocbase;
@ -142,6 +149,9 @@ class ModificationNode : public ExecutionNode {
/// @brief output variable ($NEW)
Variable const* _outVariableNew;
/// @brief whether this node contributes to statistics. Only disabled in SmartGraph case
bool _countStats;
};
/// @brief class RemoveNode

View File

@ -2809,7 +2809,9 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt,
plan->registerNode(gatherNode);
TRI_ASSERT(remoteNode);
gatherNode->addDependency(remoteNode);
if (!elements.empty() && gatherNode->collection()->numberOfShards() > 1) {
// On SmartEdge collections we have 0 shards and we need the elements
// to be injected here as well. So do not replace it with > 1
if (!elements.empty() && gatherNode->collection()->numberOfShards() != 1) {
gatherNode->setElements(elements);
}
@ -3335,7 +3337,9 @@ void arangodb::aql::distributeSortToClusterRule(
if (thisSortNode->_reinsertInCluster) {
plan->insertDependency(rn, inspectNode);
}
if (gatherNode->collection()->numberOfShards() > 1) {
// On SmartEdge collections we have 0 shards and we need the elements
// to be injected here as well. So do not replace it with > 1
if (gatherNode->collection()->numberOfShards() != 1) {
gatherNode->setElements(thisSortNode->getElements());
}
modified = true;

View File

@ -68,6 +68,20 @@ using namespace arangodb::rest;
uint64_t const RestReplicationHandler::_defaultChunkSize = 128 * 1024;
uint64_t const RestReplicationHandler::_maxChunkSize = 128 * 1024 * 1024;
static bool ignoreHiddenEnterpriseCollection(std::string const& name, bool force) {
#ifdef USE_ENTERPRISE
if (!force && name[0] == '_') {
if (strncmp(name.c_str(), "_local_", 7) == 0 ||
strncmp(name.c_str(), "_from_", 6) == 0 ||
strncmp(name.c_str(), "_to_", 4) == 0) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "Restore ignoring collection " << name << ". Will be created via SmartGraphs of a full dump. If you want to restore ONLY this collection use 'arangorestore --force'. However this is not recommended and you should instead restore the EdgeCollection of the SmartGraph instead.";
return true;
}
}
#endif
return false;
}
static Result restoreDataParser(char const* ptr, char const* pos,
std::string const& collectionName,
std::string& key,
@ -976,6 +990,10 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
return Result(TRI_ERROR_HTTP_BAD_PARAMETER, "collection name is missing");
}
if (ignoreHiddenEnterpriseCollection(name, force)) {
return {TRI_ERROR_NO_ERROR};
}
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
false)) {
// we don't care about deleted collections
@ -1050,7 +1068,10 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
// Replication Factor. Will be overwritten if not existent
VPackSlice const replFactorSlice = parameters.get("replicationFactor");
if (!replFactorSlice.isInteger()) {
bool isValidReplFactorSlice =
replFactorSlice.isInteger() ||
(replFactorSlice.isString() && replFactorSlice.isEqualString("satellite"));
if (!isValidReplFactorSlice) {
if (replicationFactor == 0) {
replicationFactor = 1;
}
@ -1065,6 +1086,11 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
// system collection?
toMerge.add("isSystem", VPackValue(true));
}
// Always ignore `shadowCollections` they were accidentially dumped in arangodb versions
// earlier than 3.3.6
toMerge.add("shadowCollections", arangodb::basics::VelocyPackHelper::NullValue());
toMerge.close(); // TopLevel
VPackSlice const type = parameters.get("type");
@ -1076,7 +1102,7 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
VPackSlice const sliceToMerge = toMerge.slice();
VPackBuilder mergedBuilder =
VPackCollection::merge(parameters, sliceToMerge, false);
VPackCollection::merge(parameters, sliceToMerge, false, true);
VPackSlice const merged = mergedBuilder.slice();
try {
@ -1120,6 +1146,21 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
////////////////////////////////////////////////////////////////////////////////
Result RestReplicationHandler::processRestoreData(std::string const& colName) {
#ifdef USE_ENTERPRISE
{
bool force = false;
bool found = false;
std::string const& forceVal = _request->value("force", found);
if (found) {
force = StringUtils::boolean(forceVal);
}
if (ignoreHiddenEnterpriseCollection(colName, force)) {
return {TRI_ERROR_NO_ERROR};
}
}
#endif
grantTemporaryRights();
if (colName == "_users") {
@ -1498,6 +1539,7 @@ int RestReplicationHandler::processRestoreIndexes(VPackSlice const& collection,
return TRI_ERROR_HTTP_BAD_PARAMETER;
}
VPackSlice const parameters = collection.get("parameters");
if (!parameters.isObject()) {
@ -1645,6 +1687,10 @@ int RestReplicationHandler::processRestoreIndexesCoordinator(
return TRI_ERROR_HTTP_BAD_PARAMETER;
}
if (ignoreHiddenEnterpriseCollection(name, force)) {
return {TRI_ERROR_NO_ERROR};
}
if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted",
false)) {
// we don't care about deleted collections

View File

@ -1922,21 +1922,6 @@ static void JS_LdapEnabled(
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief check if we are in the enterprise edition
////////////////////////////////////////////////////////////////////////////////
static void JS_IsEnterprise(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
#ifndef USE_ENTERPRISE
TRI_V8_RETURN(v8::False(isolate));
#else
TRI_V8_RETURN(v8::True(isolate));
#endif
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief decode a _rev time stamp
////////////////////////////////////////////////////////////////////////////////
@ -2255,10 +2240,6 @@ void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING(isolate, "TRUSTED_PROXIES"),
JS_TrustedProxies, true);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING(isolate, "SYS_IS_ENTERPRISE"),
JS_IsEnterprise);
TRI_AddGlobalFunctionVocbase(isolate,
TRI_V8_ASCII_STRING(isolate, "DECODE_REV"),

View File

@ -27,6 +27,7 @@
#include <iostream>
#include <thread>
#include <velocypack/Collection.h>
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
#include <boost/algorithm/clamp.hpp>
@ -404,7 +405,21 @@ arangodb::Result processJob(arangodb::httpclient::SimpleHttpClient& client,
return ::fileError(file.get(), true);
}
std::string const collectionInfo = jobData.collectionInfo.toJson();
VPackBuilder excludes;
{ // { parameters: { shadowCollections: null } }
excludes.add(VPackValue(VPackValueType::Object));
excludes.add("parameters", VPackValue(VPackValueType::Object));
excludes.add("shadowCollections", VPackSlice::nullSlice());
excludes.close();
excludes.close();
}
VPackBuilder collectionWithExcludedParametersBuilder
= VPackCollection::merge(jobData.collectionInfo, excludes.slice(), true, true);
std::string const collectionInfo =
collectionWithExcludedParametersBuilder.slice().toJson();
file->write(collectionInfo.c_str(), collectionInfo.size());
if (file->status().fail()) {
// close file and bail out
@ -830,6 +845,10 @@ Result DumpFeature::runClusterDump(httpclient::SimpleHttpClient& client) {
continue;
}
if (isIgnoredHiddenEnterpriseCollection(name)) {
continue;
}
// verify distributeShardsLike info
if (!_ignoreDistributeShardsLikeErrors) {
std::string prototypeCollection =
@ -982,4 +1001,24 @@ void DumpFeature::start() {
}
}
bool DumpFeature::isIgnoredHiddenEnterpriseCollection(
std::string const& name) const {
#ifdef USE_ENTERPRISE
if (!_force && name[0] == '_') {
if (strncmp(name.c_str(), "_local_", 7) == 0 ||
strncmp(name.c_str(), "_from_", 6) == 0 ||
strncmp(name.c_str(), "_to_", 4) == 0) {
LOG_TOPIC(INFO, arangodb::Logger::FIXME)
<< "Dump ignoring collection " << name
<< ". Will be created via SmartGraphs of a full dump. If you want to "
"dump this collection anyway use 'arangodump --force'. "
"However this is not recommended and you should instead dump "
"the EdgeCollection of the SmartGraph instead.";
return true;
}
}
#endif
return false;
}
} // namespace arangodb

View File

@ -99,6 +99,8 @@ class DumpFeature : public application_features::ApplicationFeature {
Result runDump(httpclient::SimpleHttpClient& client, std::string& dbName);
Result runClusterDump(httpclient::SimpleHttpClient& client);
bool isIgnoredHiddenEnterpriseCollection(std::string const &name) const;
private:
int& _exitCode;

View File

@ -252,6 +252,15 @@
};
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief returns if we are in enterprise version or not
// //////////////////////////////////////////////////////////////////////////////
if (global.SYS_IS_ENTERPRISE) {
exports.isEnterprise = global.SYS_IS_ENTERPRISE;
delete global.SYS_IS_ENTERPRISE;
}
// //////////////////////////////////////////////////////////////////////////////
// / @brief log function
// //////////////////////////////////////////////////////////////////////////////

View File

@ -344,10 +344,12 @@ ArangoCollection.prototype.properties = function (properties) {
var attributes = {
'doCompact': true,
'journalSize': true,
'isSmart': false,
'isSystem': false,
'isVolatile': false,
'waitForSync': true,
'shardKeys': false,
'smartGraphAttribute': false,
'numberOfShards': false,
'keyOptions': false,
'indexBuckets': true,

View File

@ -28,8 +28,11 @@
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var internal = require("internal");
var jsunity = require("jsunity");
const fs = require('fs');
const internal = require("internal");
const jsunity = require("jsunity");
const isEnterprise = internal.isEnterprise();
const db = internal.db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
@ -37,7 +40,6 @@ var jsunity = require("jsunity");
function dumpTestSuite () {
'use strict';
var db = internal.db;
return {
@ -320,11 +322,303 @@ function dumpTestSuite () {
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for the enterprise mode
////////////////////////////////////////////////////////////////////////////////
function dumpTestEnterpriseSuite () {
const smartGraphName = "UnitTestDumpSmartGraph";
const edges = "UnitTestDumpSmartEdges";
const vertices = "UnitTestDumpSmartVertices";
const orphans = "UnitTestDumpSmartOrphans";
const satellite = "UnitTestDumpSatelliteCollection";
const gm = require("@arangodb/smart-graph");
const instanceInfo = JSON.parse(require('internal').env.INSTANCEINFO);
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
},
testSatelliteCollections : function () {
let c = db._collection(satellite);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(1, p.numberOfShards);
assertEqual("satellite", p.replicationFactor);
assertEqual(100, c.count());
},
testHiddenCollectionsOmitted : function () {
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
const smartEdgeCollectionPath = fs.join(dumpDir, `${edges}.structure.json`);
const localEdgeCollectionPath = fs.join(dumpDir, `_local_${edges}.structure.json`);
const fromEdgeCollectionPath = fs.join(dumpDir, `_from_${edges}.structure.json`);
const toEdgeCollectionPath = fs.join(dumpDir, `_to_${edges}.structure.json`);
assertTrue(fs.exists(smartEdgeCollectionPath), 'Smart edge collection missing in dump!');
assertFalse(fs.exists(localEdgeCollectionPath), '_local edge collection should not have been dumped!');
assertFalse(fs.exists(fromEdgeCollectionPath), '_from edge collection should not have been dumped!');
assertFalse(fs.exists(toEdgeCollectionPath), '_to edge collection should not have been dumped!');
},
testShadowCollectionsOmitted : function () {
const encryption = fs.read(fs.join(instanceInfo.rootDir, 'dump', 'ENCRYPTION'));
if (encryption === '' || encryption === 'none') {
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
const collStructure = JSON.parse(
fs.read(fs.join(dumpDir, `${edges}.structure.json`))
);
assertTrue(collStructure.hasOwnProperty('parameters'), collStructure);
const parameters = collStructure['parameters'];
assertFalse(parameters.hasOwnProperty('shadowCollections'),
`Property 'shadowCollections' should be hidden in collection ${edges}!`);
}
},
testVertices : function () {
let c = db._collection(vertices);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart, p);
assertFalse(Object.hasOwnProperty(p, "distributeShardsLike"));
assertEqual(100, c.count());
assertEqual("value", p.smartGraphAttribute);
},
testVerticesAqlRead: function () {
let q1 = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${vertices} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(100, res1.length);
for (let i = 0; i < 100; ++i) {
assertEqual(String(i), res1[i].value);
}
let res2 = db._query(q2).toArray();
assertEqual(1, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testVerticesAqlInsert: function () {
// Precondition
assertEqual(100, db[vertices].count());
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${vertices}`;
let update = `FOR x IN ${vertices} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${vertices}`;
let remove = `FOR x IN ${vertices} FILTER x.needRemove REMOVE x INTO ${vertices}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remove them again
let resIns = db._query(insert);
assertEqual(100, resIns.getExtra().stats.writesExecuted);
assertEqual(0, resIns.getExtra().stats.writesIgnored);
assertEqual(200, db[vertices].count());
let resUp = db._query(update);
assertEqual(100, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(200, db[vertices].count());
let resRem = db._query(remove);
assertEqual(100, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(100, db[vertices].count());
},
testOrphans : function () {
let c = db._collection(orphans);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart);
assertEqual(vertices, p.distributeShardsLike);
assertEqual(100, c.count());
assertEqual("value", p.smartGraphAttribute);
},
testOrphansAqlRead: function () {
let q1 = `FOR x IN ${orphans} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${orphans} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${orphans} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(100, res1.length);
for (let i = 0; i < 100; ++i) {
assertEqual(String(i), res1[i].value);
}
let res2 = db._query(q2).toArray();
assertEqual(1, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testOrphansAqlInsert: function () {
// Precondition
let c = db[orphans];
assertEqual(100, c.count());
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${orphans}`;
let update = `FOR x IN ${orphans} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${orphans}`;
let remove = `FOR x IN ${orphans} FILTER x.needRemove REMOVE x INTO ${orphans}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
let resIns = db._query(insert);
assertEqual(100, resIns.getExtra().stats.writesExecuted);
assertEqual(0, resIns.getExtra().stats.writesIgnored);
assertEqual(200, c.count());
let resUp = db._query(update);
assertEqual(100, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(200, c.count());
let resRem = db._query(remove);
assertEqual(100, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(100, c.count());
},
testEdges : function () {
let c = db._collection(edges);
let p = c.properties();
assertEqual(3, c.type()); // Edges
//assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart);
assertEqual(vertices, p.distributeShardsLike);
assertEqual(300, c.count());
},
testEdgesAqlRead: function () {
let q1 = `FOR x IN ${edges} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${edges} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${edges} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(300, res1.length);
for (let i = 0; i < 100; ++i) {
// We have three edges per value
assertEqual(String(i), res1[3*i].value);
assertEqual(String(i), res1[3*i+1].value);
assertEqual(String(i), res1[3*i+2].value);
}
let res2 = db._query(q2).toArray();
assertEqual(3, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testEdgesAqlInsert: function () {
// Precondition
let c = db[edges];
assertEqual(300, c.count());
// We first need the vertices
let vC = db[vertices];
assertEqual(100, vC.count());
let vQ = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x._id`;
let verticesList = db._query(vQ).toArray();
let insertSameValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[i], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
let insertOtherValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[(i + 1) % 100], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
let update = `FOR x IN ${edges} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${edges}`;
let remove = `FOR x IN ${edges} FILTER x.needRemove REMOVE x INTO ${edges}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
let resInsSame = db._query(insertSameValue, {vertices: verticesList});
assertEqual(100, resInsSame.getExtra().stats.writesExecuted);
assertEqual(0, resInsSame.getExtra().stats.writesIgnored);
assertEqual(400, c.count());
let resInsOther = db._query(insertOtherValue, {vertices: verticesList});
assertEqual(100, resInsOther.getExtra().stats.writesExecuted);
assertEqual(0, resInsOther.getExtra().stats.writesIgnored);
assertEqual(500, c.count());
let resUp = db._query(update);
assertEqual(200, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(500, c.count());
let resRem = db._query(remove);
assertEqual(200, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(300, c.count());
},
testAqlGraphQuery: function() {
// Precondition
let c = db[edges];
assertEqual(300, c.count());
// We first need the vertices
let vC = db[vertices];
assertEqual(100, vC.count());
let vertexQuery = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x._id`;
let vertex = db._query(vertexQuery).toArray();
assertEqual(1, vertex.length);
let q = `FOR v IN 1..2 ANY "${vertex[0]}" GRAPH "${smartGraphName}" OPTIONS {uniqueVertices: 'path'} SORT TO_NUMBER(v.value) RETURN v`;
/* We expect the following result:
* 10 <- 9 <- 8
* 10 <- 9
* 10 -> 11
* 10 -> 11 -> 12
*/
//Validate that everything is wired to a smart graph correctly
let res = db._query(q).toArray();
assertEqual(4, res.length);
assertEqual("8", res[0].value);
assertEqual("9", res[1].value);
assertEqual("11", res[2].value);
assertEqual("12", res[3].value);
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(dumpTestSuite);
if (isEnterprise) {
jsunity.run(dumpTestEnterpriseSuite);
}
return jsunity.done();

View File

@ -28,8 +28,11 @@
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var internal = require("internal");
var jsunity = require("jsunity");
const fs = require('fs');
const internal = require("internal");
const jsunity = require("jsunity");
const isEnterprise = internal.isEnterprise();
const db = internal.db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
@ -37,7 +40,6 @@ var jsunity = require("jsunity");
function dumpTestSuite () {
'use strict';
var db = internal.db;
return {
@ -302,11 +304,300 @@ function dumpTestSuite () {
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for the enterprise mode
////////////////////////////////////////////////////////////////////////////////
function dumpTestEnterpriseSuite () {
const smartGraphName = "UnitTestDumpSmartGraph";
const edges = "UnitTestDumpSmartEdges";
const vertices = "UnitTestDumpSmartVertices";
const orphans = "UnitTestDumpSmartOrphans";
const satellite = "UnitTestDumpSatelliteCollection";
const gm = require("@arangodb/smart-graph");
const instanceInfo = JSON.parse(require('internal').env.INSTANCEINFO);
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
},
testSatelliteCollections : function () {
let c = db._collection(satellite);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(1, p.numberOfShards);
assertEqual("satellite", p.replicationFactor);
assertEqual(100, c.count());
},
testHiddenCollectionsOmitted : function () {
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
const smartEdgeCollectionPath = fs.join(dumpDir, `${edges}.structure.json`);
const localEdgeCollectionPath = fs.join(dumpDir, `_local_${edges}.structure.json`);
const fromEdgeCollectionPath = fs.join(dumpDir, `_from_${edges}.structure.json`);
const toEdgeCollectionPath = fs.join(dumpDir, `_to_${edges}.structure.json`);
assertTrue(fs.exists(smartEdgeCollectionPath), 'Smart edge collection missing in dump!');
assertFalse(fs.exists(localEdgeCollectionPath), '_local edge collection should not have been dumped!');
assertFalse(fs.exists(fromEdgeCollectionPath), '_from edge collection should not have been dumped!');
assertFalse(fs.exists(toEdgeCollectionPath), '_to edge collection should not have been dumped!');
},
testShadowCollectionsOmitted : function () {
const dumpDir = fs.join(instanceInfo.rootDir, 'dump');
const collStructure = JSON.parse(
fs.read(fs.join(dumpDir, `${edges}.structure.json`))
);
assertTrue(collStructure.hasOwnProperty('parameters'), collStructure);
const parameters = collStructure['parameters'];
assertFalse(parameters.hasOwnProperty('shadowCollections'),
`Property 'shadowCollections' should be hidden in collection ${edges}!`);
},
testVertices : function () {
let c = db._collection(vertices);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart, p);
assertFalse(Object.hasOwnProperty(p, "distributeShardsLike"));
assertEqual(100, c.count());
assertEqual("value", p.smartGraphAttribute);
},
testVerticesAqlRead: function () {
let q1 = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${vertices} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(100, res1.length);
for (let i = 0; i < 100; ++i) {
assertEqual(String(i), res1[i].value);
}
let res2 = db._query(q2).toArray();
assertEqual(1, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testVerticesAqlInsert: function () {
// Precondition
assertEqual(100, db[vertices].count());
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${vertices}`;
let update = `FOR x IN ${vertices} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${vertices}`;
let remove = `FOR x IN ${vertices} FILTER x.needRemove REMOVE x INTO ${vertices}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
let resIns = db._query(insert);
assertEqual(100, resIns.getExtra().stats.writesExecuted);
assertEqual(0, resIns.getExtra().stats.writesIgnored);
assertEqual(200, db[vertices].count());
let resUp = db._query(update);
assertEqual(100, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(200, db[vertices].count());
let resRem = db._query(remove);
assertEqual(100, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(100, db[vertices].count());
},
testOrphans : function () {
let c = db._collection(orphans);
let p = c.properties();
assertEqual(2, c.type()); // Document
assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart);
assertEqual(vertices, p.distributeShardsLike);
assertEqual(100, c.count());
assertEqual("value", p.smartGraphAttribute);
},
testOrphansAqlRead: function () {
let q1 = `FOR x IN ${orphans} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${orphans} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${orphans} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(100, res1.length);
for (let i = 0; i < 100; ++i) {
assertEqual(String(i), res1[i].value);
}
let res2 = db._query(q2).toArray();
assertEqual(1, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testOrphansAqlInsert: function () {
// Precondition
let c = db[orphans];
assertEqual(100, c.count());
let insert = `FOR i IN 0..99 INSERT {value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${orphans}`;
let update = `FOR x IN ${orphans} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${orphans}`;
let remove = `FOR x IN ${orphans} FILTER x.needRemove REMOVE x INTO ${orphans}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remoe them again
let resIns = db._query(insert);
assertEqual(100, resIns.getExtra().stats.writesExecuted);
assertEqual(0, resIns.getExtra().stats.writesIgnored);
assertEqual(200, c.count());
let resUp = db._query(update);
assertEqual(100, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(200, c.count());
let resRem = db._query(remove);
assertEqual(100, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(100, c.count());
},
testEdges : function () {
let c = db._collection(edges);
let p = c.properties();
assertEqual(3, c.type()); // Edges
//assertEqual(5, p.numberOfShards);
assertTrue(p.isSmart);
assertEqual(vertices, p.distributeShardsLike);
assertEqual(300, c.count());
},
testEdgesAqlRead: function () {
let q1 = `FOR x IN ${edges} SORT TO_NUMBER(x.value) RETURN x`;
let q2 = `FOR x IN ${edges} FILTER x.value == "10" RETURN x.value`;
// This query can be optimized to a single shard. Make sure that is still correct
let q3 = `FOR x IN ${edges} FILTER x._key == @key RETURN x.value`;
let res1 = db._query(q1).toArray();
assertEqual(300, res1.length);
for (let i = 0; i < 100; ++i) {
// We have three edges per value
assertEqual(String(i), res1[3*i].value);
assertEqual(String(i), res1[3*i+1].value);
assertEqual(String(i), res1[3*i+2].value);
}
let res2 = db._query(q2).toArray();
assertEqual(3, res2.length);
assertEqual("10", res2[0]);
for (let x of res1) {
let res3 = db._query(q3, {key: x._key}).toArray();
assertEqual(1, res3.length);
assertEqual(x.value, res3[0]);
}
},
testEdgesAqlInsert: function () {
// Precondition
let c = db[edges];
assertEqual(300, c.count());
// We first need the vertices
let vC = db[vertices];
assertEqual(100, vC.count());
let vQ = `FOR x IN ${vertices} SORT TO_NUMBER(x.value) RETURN x._id`;
let verticesList = db._query(vQ).toArray();
let insertSameValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[i], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
let insertOtherValue = `LET vs = @vertices FOR i IN 0..99 INSERT {_from: vs[i], _to: vs[(i + 1) % 100], value: TO_STRING(i), needUpdate: true, needRemove: true} INTO ${edges}`;
let update = `FOR x IN ${edges} FILTER x.needUpdate UPDATE x WITH {needUpdate: false} INTO ${edges}`;
let remove = `FOR x IN ${edges} FILTER x.needRemove REMOVE x INTO ${edges}`;
// Note: Order is important here, we first insert, than update those inserted docs, then remove them again
let resInsSame = db._query(insertSameValue, {vertices: verticesList});
assertEqual(100, resInsSame.getExtra().stats.writesExecuted);
assertEqual(0, resInsSame.getExtra().stats.writesIgnored);
assertEqual(400, c.count());
let resInsOther = db._query(insertOtherValue, {vertices: verticesList});
assertEqual(100, resInsOther.getExtra().stats.writesExecuted);
assertEqual(0, resInsOther.getExtra().stats.writesIgnored);
assertEqual(500, c.count());
let resUp = db._query(update);
assertEqual(200, resUp.getExtra().stats.writesExecuted);
assertEqual(0, resUp.getExtra().stats.writesIgnored);
assertEqual(500, c.count());
let resRem = db._query(remove);
assertEqual(200, resRem.getExtra().stats.writesExecuted);
assertEqual(0, resRem.getExtra().stats.writesIgnored);
assertEqual(300, c.count());
},
testAqlGraphQuery: function() {
// Precondition
let c = db[edges];
assertEqual(300, c.count());
// We first need the vertices
let vC = db[vertices];
assertEqual(100, vC.count());
let vertexQuery = `FOR x IN ${vertices} FILTER x.value == "10" RETURN x._id`;
let vertex = db._query(vertexQuery).toArray();
assertEqual(1, vertex.length);
let q = `FOR v IN 1..2 ANY "${vertex[0]}" GRAPH "${smartGraphName}" OPTIONS {uniqueVertices: 'path'} SORT TO_NUMBER(v.value) RETURN v`;
/* We expect the following result:
* 10 <- 9 <- 8
* 10 <- 9
* 10 -> 11
* 10 -> 11 -> 12
*/
//Validate that everything is wired to a smart graph correctly
let res = db._query(q).toArray();
assertEqual(4, res.length);
assertEqual("8", res[0].value);
assertEqual("9", res[1].value);
assertEqual("11", res[2].value);
assertEqual("12", res[3].value);
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(dumpTestSuite);
return jsunity.done();
if (isEnterprise) {
jsunity.run(dumpTestEnterpriseSuite);
}
return jsunity.done();

View File

@ -27,9 +27,75 @@
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require("@arangodb").db;
const isEnterprise = require("internal").isEnterprise();
/**
* @brief Only if enterprise mode:
* Creates a smart graph sharded by `value`
* That has 100 vertices (value 0 -> 99)
* That has 100 orphans (value 0 -> 99)
* That has 300 edges, for each value i:
* Connect i -> i
* Connect i - 1 -> i
* Connect i -> i + 1
*/
const setupSmartGraph = function () {
if (!isEnterprise) {
return;
}
const smartGraphName = "UnitTestDumpSmartGraph";
const edges = "UnitTestDumpSmartEdges";
const vertices = "UnitTestDumpSmartVertices";
const orphans = "UnitTestDumpSmartOrphans";
const gm = require("@arangodb/smart-graph");
if (gm._exists(smartGraphName)) {
gm._drop(smartGraphName, true);
}
db._drop(edges);
db._drop(vertices);
gm._create(smartGraphName, [gm._relation(edges, vertices, vertices)],
[orphans], {numberOfShards: 5, smartGraphAttribute: "value"});
let vDocs = [];
for (let i = 0; i < 100; ++i) {
vDocs.push({value: String(i)});
}
let saved = db[vertices].save(vDocs).map(v => v._id);
let eDocs = [];
for (let i = 0; i < 100; ++i) {
eDocs.push({_from: saved[(i+1) % 100], _to: saved[i], value: String(i)});
eDocs.push({_from: saved[i], _to: saved[i], value: String(i)});
eDocs.push({_from: saved[i], _to: saved[(i+1) % 100], value: String(i)});
}
db[edges].save(eDocs);
db[orphans].save(vDocs);
};
/**
* @brief Only if enterprise mode:
* Creates a satellite collection with 100 documents
*/
function setupSatelliteCollections() {
if (!isEnterprise) {
return;
}
const satelliteCollectionName = "UnitTestDumpSatelliteCollection";
db._drop(satelliteCollectionName);
db._create(satelliteCollectionName, {"replicationFactor": "satellite"});
let vDocs = [];
for (let i = 0; i < 100; ++i) {
vDocs.push({value: String(i)});
}
db[satelliteCollectionName].save(vDocs);
}
(function () {
'use strict';
var db = require("@arangodb").db;
var i, c;
try {
@ -153,6 +219,8 @@
c.save({ _key: "text" + i, value: t });
});
setupSmartGraph();
setupSatelliteCollections();
})();
return {

View File

@ -268,5 +268,6 @@ target_link_libraries(${LIB_ARANGO_V8}
# Enterprise
if (USE_ENTERPRISE)
target_compile_definitions(${LIB_ARANGO_V8} PUBLIC "-DUSE_ENTERPRISE=1")
target_include_directories(${LIB_ARANGO_V8} PUBLIC "${PROJECT_SOURCE_DIR}/${ENTERPRISE_INCLUDE_DIR}")
endif()

View File

@ -4564,6 +4564,21 @@ void TRI_ClearObjectCacheV8(v8::Isolate* isolate) {
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief check if we are in the enterprise edition
////////////////////////////////////////////////////////////////////////////////
static void JS_IsEnterprise(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
#ifndef USE_ENTERPRISE
TRI_V8_RETURN(v8::False(isolate));
#else
TRI_V8_RETURN(v8::True(isolate));
#endif
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief stores the V8 utils functions inside the global variable
////////////////////////////////////////////////////////////////////////////////
@ -4787,6 +4802,10 @@ void TRI_InitV8Utils(v8::Isolate* isolate, v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "VPACK_TO_V8"), JS_VPackToV8);
TRI_AddGlobalFunctionVocbase(
isolate, TRI_V8_ASCII_STRING(isolate, "SYS_IS_ENTERPRISE"),
JS_IsEnterprise);
// .............................................................................
// create the global variables
// .............................................................................