1
0
Fork 0

Merge branch 'engine-api' of https://github.com/arangodb/arangodb into engine-api

This commit is contained in:
jsteemann 2017-04-18 17:28:04 +02:00
commit dc5fb20b81
10 changed files with 266 additions and 141 deletions

View File

@ -97,6 +97,8 @@ bool MultiIndexIterator::next(TokenCallback const& callback, size_t limit) {
if (_currentIdx >= _iterators.size()) {
_current = nullptr;
return false;
} else {
_current = _iterators.at(_currentIdx);
}
}
}

View File

@ -331,8 +331,7 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
application_features::ApplicationServer::getFeature<DatabaseFeature>(
"Database")
->forceSyncProperties();
VPackBuilder builder =
_logicalCollection->toVelocyPackIgnore({"path", "statusString"}, true);
VPackBuilder builder = _logicalCollection->toVelocyPackIgnore({"path", "statusString"}, true, /*forPersistence*/ false);
_logicalCollection->updateProperties(builder.slice(), doSync);
}
created = true;
@ -493,20 +492,6 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
THROW_ARANGO_EXCEPTION(converted);
}
// report index key size
RocksDBOperationResult result = state->addOperation(
cid, /*ignored revisionId*/ 0, TRI_VOC_NOOP_OPERATION_UPDATE_SIZE, 0,
iter->key().size());
// transaction size limit reached -- fail
if (result.fail()) {
THROW_ARANGO_EXCEPTION(result);
}
// force intermediate commit
if (result.commitRequired()) {
// force commit
}
iter->Next();
}
}
@ -516,7 +501,9 @@ int RocksDBCollection::read(transaction::Methods* trx,
arangodb::velocypack::Slice const key,
ManagedDocumentResult& result, bool) {
TRI_ASSERT(key.isString());
//LOG_TOPIC(ERR, Logger::FIXME) << "############### Key Slice: " << key.toString();
RocksDBToken token = primaryIndex()->lookupKey(trx, StringRef(key));
//LOG_TOPIC(ERR, Logger::FIXME) << "############### TOKEN ID: " << token.revisionId();
if (token.revisionId()) {
if (readDocument(trx, token, result)) {
@ -529,14 +516,15 @@ int RocksDBCollection::read(transaction::Methods* trx,
return TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND;
}
// read using a token!
bool RocksDBCollection::readDocument(transaction::Methods* trx,
DocumentIdentifierToken const& token,
ManagedDocumentResult& result) {
// TODO: why do we have read(), readDocument() and lookupKey()?
auto tkn = static_cast<RocksDBToken const*>(&token);
TRI_voc_rid_t revisionId = tkn->revisionId();
lookupRevisionVPack(revisionId, trx, result);
return !result.empty();
auto res = lookupRevisionVPack(revisionId, trx, result);
return res.ok();
}
bool RocksDBCollection::readDocumentConditional(

View File

@ -503,7 +503,7 @@ std::string RocksDBEngine::createCollection(
TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
arangodb::LogicalCollection const* parameters) {
VPackBuilder builder =
parameters->toVelocyPackIgnore({"path", "statusString"}, true);
parameters->toVelocyPackIgnore({"path", "statusString"}, /*translate cid*/ true, /*for persistence*/ false);
int res = writeCreateCollectionMarker(vocbase->id(), id, builder.slice());
if (res != TRI_ERROR_NO_ERROR) {

View File

@ -188,7 +188,6 @@ void RocksDBTransactionCollection::addOperation(
TRI_voc_document_operation_e operationType, uint64_t operationSize,
TRI_voc_rid_t revisionId) {
switch (operationType) {
case TRI_VOC_NOOP_OPERATION_UPDATE_SIZE:
case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN:
break;
case TRI_VOC_DOCUMENT_OPERATION_INSERT:

View File

@ -298,7 +298,6 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
collection->addOperation(operationType, operationSize, revisionId);
switch (operationType) {
case TRI_VOC_NOOP_OPERATION_UPDATE_SIZE:
case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN:
break;
case TRI_VOC_DOCUMENT_OPERATION_INSERT:
@ -343,7 +342,6 @@ void RocksDBTransactionState::reset() {
}
_nestingLevel = 0;
//
updateStatus(transaction::Status::CREATED);
// start new transaction

View File

@ -73,7 +73,6 @@ TRI_voc_rid_t TRI_StringToRid(char const* p, size_t len, bool& isOld, bool warn)
/// @brief enum for write operations
enum TRI_voc_document_operation_e : uint8_t {
TRI_VOC_DOCUMENT_OPERATION_UNKNOWN = 0,
TRI_VOC_NOOP_OPERATION_UPDATE_SIZE,
TRI_VOC_DOCUMENT_OPERATION_INSERT,
TRI_VOC_DOCUMENT_OPERATION_UPDATE,
TRI_VOC_DOCUMENT_OPERATION_REPLACE,

View File

@ -0,0 +1,220 @@
/*jshint esnext: true */
/*global assertEqual, fail, AQL_EXECUTE, AQL_EXPLAIN, AQL_EXECUTEJSON */
////////////////////////////////////////////////////////////////////////////////
/// @brief Spec for the AQL FOR x IN GRAPH name statement
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2014 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
"use strict";
const jsunity = require("jsunity");
const internal = require("internal");
const db = internal.db;
const errors = require("@arangodb").errors;
const gm = require("@arangodb/general-graph");
const vn = "UnitTestVertexCollection";
const en = "UnitTestEdgeCollection";
const isCluster = require("@arangodb/cluster").isCluster();
var _ = require("lodash");
var vertex = {};
var edge = {};
var vc;
var ec;
var cleanup = function () {
db._drop(vn);
db._drop(en);
vertex = {};
edge = {};
};
function optimizeNonVertexCentricIndexesSuite () {
let explain = function (query, params) {
return AQL_EXPLAIN(query, params, { optimizer: { rules: [ "+all" ] } });
};
let vertices = {};
let edges = {};
return {
setUpAll: () => {
cleanup();
vc = db._create(vn, {numberOfShards: 4});
ec = db._createEdgeCollection(en, {numberOfShards: 4});
vertices.A = vc.save({_key: "A"})._id;
vertices.B = vc.save({_key: "B"})._id;
vertices.C = vc.save({_key: "C"})._id;
vertices.D = vc.save({_key: "D"})._id;
vertices.E = vc.save({_key: "E"})._id;
vertices.F = vc.save({_key: "F"})._id;
vertices.G = vc.save({_key: "G"})._id;
vertices.FOO = vc.save({_key: "FOO"})._id;
vertices.BAR = vc.save({_key: "BAR"})._id;
edges.AB = ec.save({_key: "AB", _from: vertices.A, _to: vertices.B, foo: "A", bar: true})._id;
edges.BC = ec.save({_key: "BC", _from: vertices.B, _to: vertices.C, foo: "B", bar: true})._id;
edges.BD = ec.save({_key: "BD", _from: vertices.B, _to: vertices.D, foo: "C", bar: false})._id;
edges.AE = ec.save({_key: "AE", _from: vertices.A, _to: vertices.E, foo: "D", bar: true})._id;
edges.EF = ec.save({_key: "EF", _from: vertices.E, _to: vertices.F, foo: "E", bar: true})._id;
edges.EG = ec.save({_key: "EG", _from: vertices.E, _to: vertices.G, foo: "F", bar: false})._id;
// Adding these edges to make the estimate for the edge-index extremly bad
let badEdges = [];
for (let j = 0; j < 1000; ++j) {
badEdges.push({_from: vertices.FOO, _to: vertices.BAR, foo: "foo" + j, bar: j});
}
ec.save(badEdges);
},
tearDownAll: cleanup,
tearDown: () => {
// After each test get rid of all superflous indexes.
var idxs = db[en].getIndexes();
for (let i = 2; i < idxs.length; ++i) {
db[en].dropIndex(idxs[i].id);
}
},
testHashIndex : () => {
var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[0].foo == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.levels["0"];
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[0].foo == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.levels["0"];
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllHashIndex : () => {
var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllSkiplistIncompleteIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo", "unknown", "_from"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, it does contain _from, but cannot use it.
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
}
};
};
if (!isCluster) {
jsunity.run(optimizeNonVertexCentricIndexesSuite);
}
return jsunity.done();

View File

@ -3219,48 +3219,6 @@ function optimizeNonVertexCentricIndexesSuite () {
assertEqual(result[0], vertices.B);
},
testHashIndex : () => {
var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[0].foo == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.levels["0"];
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[0].foo == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.levels["0"];
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testUniqueSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: true, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
@ -3303,48 +3261,6 @@ function optimizeNonVertexCentricIndexesSuite () {
assertEqual(result[0], vertices.B);
},
testAllHashIndex : () => {
var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllUniqueSkiplistIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: true, sparse: false});
// This index is assumed to be better than edge-index, but does not contain _from/_to
@ -3364,28 +3280,7 @@ function optimizeNonVertexCentricIndexesSuite () {
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
testAllSkiplistIncompleteIndex : () => {
var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo", "unknown", "_from"], unique: false, sparse: false});
// This index is assumed to be better than edge-index, it does contain _from, but cannot use it.
let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en}
FILTER p.edges[*].foo ALL == "A"
RETURN v._id`;
let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";});
assertEqual(1, exp.length);
// Check if we did use the hash index on level 0
let indexes = exp[0].indexes;
let found = indexes.base;
assertEqual(1, found.length);
found = found[0];
assertEqual(idx.type, found.type);
assertEqual(idx.fields, found.fields);
let result = db._query(q).toArray();
assertEqual(result[0], vertices.B);
},
}
};
};

View File

@ -101,7 +101,9 @@ function transactionRevisionsSuite () {
} catch (err) {
}
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.count());
assertEqual(1, c.toArray().length);
assertEqual(1, c.document("test").value);
@ -117,7 +119,9 @@ function transactionRevisionsSuite () {
} catch (err) {
}
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.count());
assertEqual(1, c.toArray().length);
assertEqual(1, c.document("test").value);
@ -138,7 +142,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
},
@ -157,7 +163,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
},
@ -172,7 +180,9 @@ function transactionRevisionsSuite () {
});
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(2, c.document("test").value);
},
@ -181,7 +191,9 @@ function transactionRevisionsSuite () {
c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true });
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(2, c.document("test").value);
},
@ -195,7 +207,9 @@ function transactionRevisionsSuite () {
});
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(2, c.document("test").value);
},
@ -214,7 +228,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
},
@ -233,7 +249,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
},
@ -253,7 +271,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
},
@ -268,7 +288,9 @@ function transactionRevisionsSuite () {
});
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(2, c.document("test").value);
},
@ -288,7 +310,9 @@ function transactionRevisionsSuite () {
}
assertEqual(1, c.toArray().length);
if (db._engine().name === "mmfiles") {
assertEqual(1, c.figures().revisions.count);
}
assertEqual(1, c.document("test").value);
}