From 01e27b803f98ebd8a8ebd7982eaf1d9de9c164ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Gra=CC=88tzer?= Date: Tue, 18 Apr 2017 15:14:35 +0200 Subject: [PATCH 1/4] Starting AQL tests --- .../tests/aql/aql-graph-traverser-mmfiles.js | 220 ++++++++++++++++++ js/server/tests/aql/aql-graph-traverser.js | 107 +-------- 2 files changed, 221 insertions(+), 106 deletions(-) create mode 100644 js/server/tests/aql/aql-graph-traverser-mmfiles.js diff --git a/js/server/tests/aql/aql-graph-traverser-mmfiles.js b/js/server/tests/aql/aql-graph-traverser-mmfiles.js new file mode 100644 index 0000000000..e03e271029 --- /dev/null +++ b/js/server/tests/aql/aql-graph-traverser-mmfiles.js @@ -0,0 +1,220 @@ +/*jshint esnext: true */ +/*global assertEqual, fail, AQL_EXECUTE, AQL_EXPLAIN, AQL_EXECUTEJSON */ + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Spec for the AQL FOR x IN GRAPH name statement +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2014 ArangoDB GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Michael Hackstein +/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +"use strict"; + +const jsunity = require("jsunity"); + +const internal = require("internal"); +const db = internal.db; +const errors = require("@arangodb").errors; +const gm = require("@arangodb/general-graph"); +const vn = "UnitTestVertexCollection"; +const en = "UnitTestEdgeCollection"; +const isCluster = require("@arangodb/cluster").isCluster(); +var _ = require("lodash"); +var vertex = {}; +var edge = {}; +var vc; +var ec; + +var cleanup = function () { + db._drop(vn); + db._drop(en); + vertex = {}; + edge = {}; +}; + +function optimizeNonVertexCentricIndexesSuite () { + + let explain = function (query, params) { + return AQL_EXPLAIN(query, params, { optimizer: { rules: [ "+all" ] } }); + }; + + let vertices = {}; + let edges = {}; + + return { + setUpAll: () => { + cleanup(); + vc = db._create(vn, {numberOfShards: 4}); + ec = db._createEdgeCollection(en, {numberOfShards: 4}); + vertices.A = vc.save({_key: "A"})._id; + vertices.B = vc.save({_key: "B"})._id; + vertices.C = vc.save({_key: "C"})._id; + vertices.D = vc.save({_key: "D"})._id; + vertices.E = vc.save({_key: "E"})._id; + vertices.F = vc.save({_key: "F"})._id; + vertices.G = vc.save({_key: "G"})._id; + + vertices.FOO = vc.save({_key: "FOO"})._id; + vertices.BAR = vc.save({_key: "BAR"})._id; + + edges.AB = ec.save({_key: "AB", _from: vertices.A, _to: vertices.B, foo: "A", bar: true})._id; + edges.BC = ec.save({_key: "BC", _from: vertices.B, _to: vertices.C, foo: "B", bar: true})._id; + edges.BD = ec.save({_key: "BD", _from: vertices.B, _to: vertices.D, foo: "C", bar: false})._id; + edges.AE = ec.save({_key: "AE", _from: vertices.A, _to: vertices.E, foo: "D", bar: true})._id; + edges.EF = ec.save({_key: "EF", _from: vertices.E, _to: vertices.F, foo: "E", bar: true})._id; + edges.EG = ec.save({_key: "EG", _from: vertices.E, _to: vertices.G, foo: "F", bar: false})._id; + + + // Adding these edges to make the estimate for the edge-index extremly bad + let badEdges = []; + for (let j = 0; j < 1000; ++j) { + badEdges.push({_from: vertices.FOO, _to: vertices.BAR, foo: "foo" + j, bar: j}); + } + ec.save(badEdges); + }, + + tearDownAll: cleanup, + + tearDown: () => { + // After each test get rid of all superflous indexes. + var idxs = db[en].getIndexes(); + for (let i = 2; i < idxs.length; ++i) { + db[en].dropIndex(idxs[i].id); + } + }, + + + testHashIndex : () => { + var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false}); + // This index is assumed to be better than edge-index, but does not contain _from/_to + let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} + FILTER p.edges[0].foo == "A" + RETURN v._id`; + + let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); + assertEqual(1, exp.length); + // Check if we did use the hash index on level 0 + let indexes = exp[0].indexes; + let found = indexes.levels["0"]; + assertEqual(1, found.length); + found = found[0]; + assertEqual(idx.type, found.type); + assertEqual(idx.fields, found.fields); + + let result = db._query(q).toArray(); + assertEqual(result[0], vertices.B); + }, + + testSkiplistIndex : () => { + var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false}); + // This index is assumed to be better than edge-index, but does not contain _from/_to + let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} + FILTER p.edges[0].foo == "A" + RETURN v._id`; + + let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); + assertEqual(1, exp.length); + // Check if we did use the hash index on level 0 + let indexes = exp[0].indexes; + let found = indexes.levels["0"]; + assertEqual(1, found.length); + found = found[0]; + assertEqual(idx.type, found.type); + assertEqual(idx.fields, found.fields); + + let result = db._query(q).toArray(); + assertEqual(result[0], vertices.B); + }, + + + testAllHashIndex : () => { + var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false}); + // This index is assumed to be better than edge-index, but does not contain _from/_to + let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} + FILTER p.edges[*].foo ALL == "A" + RETURN v._id`; + + let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); + assertEqual(1, exp.length); + // Check if we did use the hash index on level 0 + let indexes = exp[0].indexes; + let found = indexes.base; + assertEqual(1, found.length); + found = found[0]; + assertEqual(idx.type, found.type); + assertEqual(idx.fields, found.fields); + + let result = db._query(q).toArray(); + assertEqual(result[0], vertices.B); + }, + + testAllSkiplistIndex : () => { + var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false}); + // This index is assumed to be better than edge-index, but does not contain _from/_to + let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} + FILTER p.edges[*].foo ALL == "A" + RETURN v._id`; + + let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); + assertEqual(1, exp.length); + // Check if we did use the hash index on level 0 + let indexes = exp[0].indexes; + let found = indexes.base; + assertEqual(1, found.length); + found = found[0]; + assertEqual(idx.type, found.type); + assertEqual(idx.fields, found.fields); + + let result = db._query(q).toArray(); + assertEqual(result[0], vertices.B); + }, + + testAllSkiplistIncompleteIndex : () => { + var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo", "unknown", "_from"], unique: false, sparse: false}); + // This index is assumed to be better than edge-index, it does contain _from, but cannot use it. + let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} + FILTER p.edges[*].foo ALL == "A" + RETURN v._id`; + + let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); + assertEqual(1, exp.length); + // Check if we did use the hash index on level 0 + let indexes = exp[0].indexes; + let found = indexes.base; + assertEqual(1, found.length); + found = found[0]; + assertEqual(idx.type, found.type); + assertEqual(idx.fields, found.fields); + + let result = db._query(q).toArray(); + assertEqual(result[0], vertices.B); + } + + }; +}; + +if (!isCluster) { + jsunity.run(optimizeNonVertexCentricIndexesSuite); +} + +return jsunity.done(); diff --git a/js/server/tests/aql/aql-graph-traverser.js b/js/server/tests/aql/aql-graph-traverser.js index dd7b4516cc..93c4c87399 100644 --- a/js/server/tests/aql/aql-graph-traverser.js +++ b/js/server/tests/aql/aql-graph-traverser.js @@ -3219,48 +3219,6 @@ function optimizeNonVertexCentricIndexesSuite () { assertEqual(result[0], vertices.B); }, - testHashIndex : () => { - var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false}); - // This index is assumed to be better than edge-index, but does not contain _from/_to - let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} - FILTER p.edges[0].foo == "A" - RETURN v._id`; - - let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); - assertEqual(1, exp.length); - // Check if we did use the hash index on level 0 - let indexes = exp[0].indexes; - let found = indexes.levels["0"]; - assertEqual(1, found.length); - found = found[0]; - assertEqual(idx.type, found.type); - assertEqual(idx.fields, found.fields); - - let result = db._query(q).toArray(); - assertEqual(result[0], vertices.B); - }, - - testSkiplistIndex : () => { - var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false}); - // This index is assumed to be better than edge-index, but does not contain _from/_to - let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} - FILTER p.edges[0].foo == "A" - RETURN v._id`; - - let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); - assertEqual(1, exp.length); - // Check if we did use the hash index on level 0 - let indexes = exp[0].indexes; - let found = indexes.levels["0"]; - assertEqual(1, found.length); - found = found[0]; - assertEqual(idx.type, found.type); - assertEqual(idx.fields, found.fields); - - let result = db._query(q).toArray(); - assertEqual(result[0], vertices.B); - }, - testUniqueSkiplistIndex : () => { var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: true, sparse: false}); // This index is assumed to be better than edge-index, but does not contain _from/_to @@ -3303,48 +3261,6 @@ function optimizeNonVertexCentricIndexesSuite () { assertEqual(result[0], vertices.B); }, - testAllHashIndex : () => { - var idx = db[en].ensureIndex({type: "hash", fields: ["foo"], unique: false, sparse: false}); - // This index is assumed to be better than edge-index, but does not contain _from/_to - let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} - FILTER p.edges[*].foo ALL == "A" - RETURN v._id`; - - let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); - assertEqual(1, exp.length); - // Check if we did use the hash index on level 0 - let indexes = exp[0].indexes; - let found = indexes.base; - assertEqual(1, found.length); - found = found[0]; - assertEqual(idx.type, found.type); - assertEqual(idx.fields, found.fields); - - let result = db._query(q).toArray(); - assertEqual(result[0], vertices.B); - }, - - testAllSkiplistIndex : () => { - var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: false, sparse: false}); - // This index is assumed to be better than edge-index, but does not contain _from/_to - let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} - FILTER p.edges[*].foo ALL == "A" - RETURN v._id`; - - let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); - assertEqual(1, exp.length); - // Check if we did use the hash index on level 0 - let indexes = exp[0].indexes; - let found = indexes.base; - assertEqual(1, found.length); - found = found[0]; - assertEqual(idx.type, found.type); - assertEqual(idx.fields, found.fields); - - let result = db._query(q).toArray(); - assertEqual(result[0], vertices.B); - }, - testAllUniqueSkiplistIndex : () => { var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo"], unique: true, sparse: false}); // This index is assumed to be better than edge-index, but does not contain _from/_to @@ -3364,28 +3280,7 @@ function optimizeNonVertexCentricIndexesSuite () { let result = db._query(q).toArray(); assertEqual(result[0], vertices.B); - }, - - testAllSkiplistIncompleteIndex : () => { - var idx = db[en].ensureIndex({type: "skiplist", fields: ["foo", "unknown", "_from"], unique: false, sparse: false}); - // This index is assumed to be better than edge-index, it does contain _from, but cannot use it. - let q = `FOR v,e,p IN OUTBOUND "${vertices.A}" ${en} - FILTER p.edges[*].foo ALL == "A" - RETURN v._id`; - - let exp = explain(q, {}).plan.nodes.filter(node => {return node.type === "TraversalNode";}); - assertEqual(1, exp.length); - // Check if we did use the hash index on level 0 - let indexes = exp[0].indexes; - let found = indexes.base; - assertEqual(1, found.length); - found = found[0]; - assertEqual(idx.type, found.type); - assertEqual(idx.fields, found.fields); - - let result = db._query(q).toArray(); - assertEqual(result[0], vertices.B); - }, + } }; }; From c12bd6f2ed0b011fbbb4f7c08efdd1f1910c55f9 Mon Sep 17 00:00:00 2001 From: Jan Christoph Uhde Date: Tue, 18 Apr 2017 16:58:44 +0200 Subject: [PATCH 2/4] remove code that tracks size of index operations in RocksDBTransactions --- arangod/RocksDBEngine/RocksDBCollection.cpp | 26 +++++-------------- arangod/RocksDBEngine/RocksDBEngine.cpp | 2 +- .../RocksDBTransactionCollection.cpp | 1 - .../RocksDBEngine/RocksDBTransactionState.cpp | 2 -- arangod/VocBase/voc-types.h | 1 - 5 files changed, 8 insertions(+), 24 deletions(-) diff --git a/arangod/RocksDBEngine/RocksDBCollection.cpp b/arangod/RocksDBEngine/RocksDBCollection.cpp index 6a75f5d8f3..3a3c4606a3 100644 --- a/arangod/RocksDBEngine/RocksDBCollection.cpp +++ b/arangod/RocksDBEngine/RocksDBCollection.cpp @@ -329,8 +329,7 @@ std::shared_ptr RocksDBCollection::createIndex( application_features::ApplicationServer::getFeature( "Database") ->forceSyncProperties(); - VPackBuilder builder = - _logicalCollection->toVelocyPackIgnore({"path", "statusString"}, true); + VPackBuilder builder = _logicalCollection->toVelocyPackIgnore({"path", "statusString"}, true, /*forPersistence*/ false); _logicalCollection->updateProperties(builder.slice(), doSync); } created = true; @@ -491,20 +490,6 @@ void RocksDBCollection::truncate(transaction::Methods* trx, THROW_ARANGO_EXCEPTION(converted); } - // report index key size - RocksDBOperationResult result = state->addOperation( - cid, /*ignored revisionId*/ 0, TRI_VOC_NOOP_OPERATION_UPDATE_SIZE, 0, - iter->key().size()); - - // transaction size limit reached -- fail - if (result.fail()) { - THROW_ARANGO_EXCEPTION(result); - } - - // force intermediate commit - if (result.commitRequired()) { - // force commit - } iter->Next(); } } @@ -514,7 +499,9 @@ int RocksDBCollection::read(transaction::Methods* trx, arangodb::velocypack::Slice const key, ManagedDocumentResult& result, bool) { TRI_ASSERT(key.isString()); + //LOG_TOPIC(ERR, Logger::FIXME) << "############### Key Slice: " << key.toString(); RocksDBToken token = primaryIndex()->lookupKey(trx, StringRef(key)); + //LOG_TOPIC(ERR, Logger::FIXME) << "############### TOKEN ID: " << token.revisionId(); if (token.revisionId()) { if (readDocument(trx, token, result)) { @@ -526,7 +513,7 @@ int RocksDBCollection::read(transaction::Methods* trx, << token.revisionId() << " for key " << key.copyString();*/ } } else { - /*LOG_TOPIC(ERR, Logger::DEVEL) << "#" << trx->state()->id() + /*LOG_TOPIC(ERR, Logger::FIXME) << "#" << trx->state()->id() << " failed to find token for " << key.copyString() << " in read";*/ } @@ -535,14 +522,15 @@ int RocksDBCollection::read(transaction::Methods* trx, return TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND; } +// read using a token! bool RocksDBCollection::readDocument(transaction::Methods* trx, DocumentIdentifierToken const& token, ManagedDocumentResult& result) { // TODO: why do we have read(), readDocument() and lookupKey()? auto tkn = static_cast(&token); TRI_voc_rid_t revisionId = tkn->revisionId(); - lookupRevisionVPack(revisionId, trx, result); - return !result.empty(); + auto res = lookupRevisionVPack(revisionId, trx, result); + return res.ok(); } bool RocksDBCollection::readDocumentConditional( diff --git a/arangod/RocksDBEngine/RocksDBEngine.cpp b/arangod/RocksDBEngine/RocksDBEngine.cpp index 0ccdfdd270..e74bda78fc 100644 --- a/arangod/RocksDBEngine/RocksDBEngine.cpp +++ b/arangod/RocksDBEngine/RocksDBEngine.cpp @@ -503,7 +503,7 @@ std::string RocksDBEngine::createCollection( TRI_vocbase_t* vocbase, TRI_voc_cid_t id, arangodb::LogicalCollection const* parameters) { VPackBuilder builder = - parameters->toVelocyPackIgnore({"path", "statusString"}, true); + parameters->toVelocyPackIgnore({"path", "statusString"}, /*translate cid*/ true, /*for persistence*/ false); int res = writeCreateCollectionMarker(vocbase->id(), id, builder.slice()); if (res != TRI_ERROR_NO_ERROR) { diff --git a/arangod/RocksDBEngine/RocksDBTransactionCollection.cpp b/arangod/RocksDBEngine/RocksDBTransactionCollection.cpp index 250cca030e..ab72c2be55 100644 --- a/arangod/RocksDBEngine/RocksDBTransactionCollection.cpp +++ b/arangod/RocksDBEngine/RocksDBTransactionCollection.cpp @@ -189,7 +189,6 @@ void RocksDBTransactionCollection::addOperation( TRI_voc_document_operation_e operationType, uint64_t operationSize, TRI_voc_rid_t revisionId) { switch (operationType) { - case TRI_VOC_NOOP_OPERATION_UPDATE_SIZE: case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN: break; case TRI_VOC_DOCUMENT_OPERATION_INSERT: diff --git a/arangod/RocksDBEngine/RocksDBTransactionState.cpp b/arangod/RocksDBEngine/RocksDBTransactionState.cpp index 9aebfd547d..5229a00dd0 100644 --- a/arangod/RocksDBEngine/RocksDBTransactionState.cpp +++ b/arangod/RocksDBEngine/RocksDBTransactionState.cpp @@ -302,7 +302,6 @@ RocksDBOperationResult RocksDBTransactionState::addOperation( collection->addOperation(operationType, operationSize, revisionId); switch (operationType) { - case TRI_VOC_NOOP_OPERATION_UPDATE_SIZE: case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN: break; case TRI_VOC_DOCUMENT_OPERATION_INSERT: @@ -347,7 +346,6 @@ void RocksDBTransactionState::reset(){ } _nestingLevel = 0; - // updateStatus(transaction::Status::CREATED); // start new transaction diff --git a/arangod/VocBase/voc-types.h b/arangod/VocBase/voc-types.h index 3f55e49927..3fcc061a43 100644 --- a/arangod/VocBase/voc-types.h +++ b/arangod/VocBase/voc-types.h @@ -73,7 +73,6 @@ TRI_voc_rid_t TRI_StringToRid(char const* p, size_t len, bool& isOld, bool warn) /// @brief enum for write operations enum TRI_voc_document_operation_e : uint8_t { TRI_VOC_DOCUMENT_OPERATION_UNKNOWN = 0, - TRI_VOC_NOOP_OPERATION_UPDATE_SIZE, TRI_VOC_DOCUMENT_OPERATION_INSERT, TRI_VOC_DOCUMENT_OPERATION_UPDATE, TRI_VOC_DOCUMENT_OPERATION_REPLACE, From c31d4c1590829b5b7e1f25b35111e494cda2541c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Gra=CC=88tzer?= Date: Tue, 18 Apr 2017 17:17:28 +0200 Subject: [PATCH 3/4] Fixed iterator --- arangod/Indexes/IndexIterator.cpp | 2 ++ ...-optimizer-geoindex.js => aql-optimizer-geoindex-mmfiles.js} | 0 2 files changed, 2 insertions(+) rename js/server/tests/aql/{aql-optimizer-geoindex.js => aql-optimizer-geoindex-mmfiles.js} (100%) diff --git a/arangod/Indexes/IndexIterator.cpp b/arangod/Indexes/IndexIterator.cpp index 641ab20fd8..7decd16401 100644 --- a/arangod/Indexes/IndexIterator.cpp +++ b/arangod/Indexes/IndexIterator.cpp @@ -97,6 +97,8 @@ bool MultiIndexIterator::next(TokenCallback const& callback, size_t limit) { if (_currentIdx >= _iterators.size()) { _current = nullptr; return false; + } else { + _current = _iterators.at(_currentIdx); } } } diff --git a/js/server/tests/aql/aql-optimizer-geoindex.js b/js/server/tests/aql/aql-optimizer-geoindex-mmfiles.js similarity index 100% rename from js/server/tests/aql/aql-optimizer-geoindex.js rename to js/server/tests/aql/aql-optimizer-geoindex-mmfiles.js From 73f8c970efde6d4ac7bac4454f86f1750b5806c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20Gra=CC=88tzer?= Date: Tue, 18 Apr 2017 17:26:37 +0200 Subject: [PATCH 4/4] Fixing shell-transaction --- .../shell/shell-transactions-noncluster.js | 48 ++++++++++++++----- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/js/server/tests/shell/shell-transactions-noncluster.js b/js/server/tests/shell/shell-transactions-noncluster.js index 0bd21d832f..2a69293e8f 100644 --- a/js/server/tests/shell/shell-transactions-noncluster.js +++ b/js/server/tests/shell/shell-transactions-noncluster.js @@ -101,7 +101,9 @@ function transactionRevisionsSuite () { } catch (err) { } - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.count()); assertEqual(1, c.toArray().length); assertEqual(1, c.document("test").value); @@ -117,7 +119,9 @@ function transactionRevisionsSuite () { } catch (err) { } - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.count()); assertEqual(1, c.toArray().length); assertEqual(1, c.document("test").value); @@ -138,7 +142,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }, @@ -157,7 +163,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }, @@ -172,7 +180,9 @@ function transactionRevisionsSuite () { }); assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(2, c.document("test").value); }, @@ -181,7 +191,9 @@ function transactionRevisionsSuite () { c.update("test", { _key: "test", _rev: doc._rev, value: 2 }, { isRestore: true }); assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(2, c.document("test").value); }, @@ -195,7 +207,9 @@ function transactionRevisionsSuite () { }); assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(2, c.document("test").value); }, @@ -214,7 +228,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }, @@ -233,7 +249,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }, @@ -253,7 +271,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }, @@ -268,7 +288,9 @@ function transactionRevisionsSuite () { }); assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(2, c.document("test").value); }, @@ -288,7 +310,9 @@ function transactionRevisionsSuite () { } assertEqual(1, c.toArray().length); - assertEqual(1, c.figures().revisions.count); + if (db._engine().name === "mmfiles") { + assertEqual(1, c.figures().revisions.count); + } assertEqual(1, c.document("test").value); }