mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'engine-api' of https://github.com/arangodb/arangodb into engine-api
* 'engine-api' of https://github.com/arangodb/arangodb: slightly adjust error messages Fixing jslint errors Add changelog from 3.1 Properly generate an error message during import Fixed scannedIndex values in traverser tests. It did not report the correct number of lookups required after modification for new Storage-Engine fixed issue #2429 issue #2427: change while into an if don't fail when non-array, but simply ignore it
This commit is contained in:
commit
9e366d2193
|
@ -178,6 +178,10 @@ v3.2.alpha1 (2017-02-05)
|
|||
v3.1.18 (2017-XX-XX)
|
||||
--------------------
|
||||
|
||||
* better error messages during restore collection
|
||||
|
||||
* Completely overhaul supervision. More detailed tests
|
||||
|
||||
* Fixed a dead-lock situation in cluster traversers, it could happen in
|
||||
rare cases if the computation on one DBServer could be completed much earlier
|
||||
than the other server. It could also be restricted to SmartGraphs only.
|
||||
|
|
|
@ -344,17 +344,14 @@ int MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
auto& bound = bounds[i];
|
||||
iterator->Seek(rocksdb::Slice(bound.first.c_str(), bound.first.size()));
|
||||
|
||||
while (iterator->Valid()) {
|
||||
if (iterator->Valid()) {
|
||||
int res = comparator->Compare(
|
||||
iterator->key(),
|
||||
rocksdb::Slice(bound.second.c_str(), bound.second.size()));
|
||||
|
||||
if (res > 0) {
|
||||
break;
|
||||
if (res <= 0) {
|
||||
uniqueConstraintViolated = true;
|
||||
}
|
||||
|
||||
uniqueConstraintViolated = true;
|
||||
break;
|
||||
}
|
||||
|
||||
delete iterator;
|
||||
|
|
|
@ -57,6 +57,7 @@ class AggregatorHandler {
|
|||
|
||||
/// return true if there are values in this Slice
|
||||
void setAggregatedValues(VPackSlice const& workerValues);
|
||||
//void setAggregatedValue(std::string const& name, const void* valuePtr);
|
||||
|
||||
/// get the pointer to an aggregator value
|
||||
const void* getAggregatedValue(std::string const& name);
|
||||
|
|
|
@ -1297,6 +1297,12 @@ void RestReplicationHandler::handleCommandRestoreCollection() {
|
|||
|
||||
try {
|
||||
parsedRequest = _request->toVelocyPackBuilderPtr();
|
||||
} catch(arangodb::velocypack::Exception const& e) {
|
||||
std::string errorMsg = "invalid JSON: ";
|
||||
errorMsg += e.what();
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
errorMsg);
|
||||
return;
|
||||
} catch (...) {
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
|
||||
"invalid JSON");
|
||||
|
|
|
@ -631,18 +631,19 @@ int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
newSlice.byteSize(), res.keySize());
|
||||
|
||||
// transaction size limit reached -- fail
|
||||
if (res.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
if (result.fail()) {
|
||||
THROW_ARANGO_EXCEPTION(result);
|
||||
}
|
||||
|
||||
guard.commit();
|
||||
|
||||
// force intermediate commit
|
||||
if (result.commitRequired()) {
|
||||
// force commit
|
||||
}
|
||||
|
||||
guard.commit();
|
||||
}
|
||||
|
||||
|
||||
return res.errorNumber();
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ void RocksDBEngine::collectOptions(
|
|||
// control transaction size for RocksDB engine
|
||||
_maxTransactionSize =
|
||||
std::numeric_limits<uint64_t>::max(); // set sensible default value here
|
||||
options->addOption("--rocksdb.max-transaction-size", "transaction size limit",
|
||||
options->addOption("--rocksdb.max-transaction-size", "transaction size limit (in bytes)",
|
||||
new UInt64Parameter(&_maxTransactionSize));
|
||||
|
||||
// control intermediate transactions in RocksDB
|
||||
|
|
|
@ -285,7 +285,7 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
uint64_t newSize = _transactionSize + operationSize + keySize;
|
||||
if (_maxTransactionSize < newSize) {
|
||||
// we hit the transaction size limit
|
||||
std::string message = "Maximal transaction size limit of " + std::to_string(_maxTransactionSize) + " Bytes reached!";
|
||||
std::string message = "maximal transaction size limit of " + std::to_string(_maxTransactionSize) + " bytes reached!";
|
||||
res.reset(TRI_ERROR_RESOURCE_LIMIT, message);
|
||||
return res;
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
static_cast<RocksDBTransactionCollection*>(findCollection(cid));
|
||||
|
||||
if (collection == nullptr) {
|
||||
std::string message = "Collection (" + collection->collectionName() + ") not found in transaction state";
|
||||
std::string message = "collection '" + collection->collectionName() + "' not found in transaction state";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,12 +26,12 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/SmallVector.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Hints.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/AccessMode.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/status.h>
|
||||
|
|
|
@ -83,7 +83,6 @@ aql::AqlValue TraverserCache::fetchAqlResult(StringRef idString) {
|
|||
}
|
||||
|
||||
void TraverserCache::insertDocument(StringRef idString, arangodb::velocypack::Slice const& document) {
|
||||
++_insertedDocuments;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -291,7 +291,7 @@ function ClusterCollectionSuite () {
|
|||
assertTrue(c.hasOwnProperty("_id"));
|
||||
assertEqual([ "_key" ], c.properties().shardKeys);
|
||||
assertFalse(c.properties().waitForSync);
|
||||
if (db._engine().name == "mmfiles") {
|
||||
if (db._engine().name === "mmfiles") {
|
||||
assertEqual(1048576, c.properties().journalSize);
|
||||
}
|
||||
},
|
||||
|
|
|
@ -521,11 +521,11 @@ ArangoCollection.prototype.lookupFulltextIndex = function (field, minLength) {
|
|||
ArangoCollection.prototype.getIndexes = function (withFigures) {
|
||||
'use strict';
|
||||
var indexes = this.getIndexesPrivate(withFigures);
|
||||
if (this.type() == 3) {
|
||||
if (this.type() === 3) {
|
||||
var result = [];
|
||||
for (var i = 0; i < indexes.length; i++) {
|
||||
if(indexes[i].type == "edge") {
|
||||
if (indexes[i].fields.length == 1
|
||||
if(indexes[i].type === "edge") {
|
||||
if (indexes[i].fields.length === 1
|
||||
&& indexes[i].fields[0] === "_from") {
|
||||
indexes[i].fields.push("_to");
|
||||
result.push(indexes[i]);
|
||||
|
|
|
@ -1789,7 +1789,7 @@ function complexFilteringSuite () {
|
|||
assertEqual(stats.scannedIndex, 2);
|
||||
}
|
||||
else {
|
||||
assertEqual(stats.scannedIndex, 2);
|
||||
assertEqual(stats.scannedIndex, 1);
|
||||
}
|
||||
assertEqual(stats.filtered, 1);
|
||||
},
|
||||
|
@ -1883,7 +1883,11 @@ function complexFilteringSuite () {
|
|||
// 1 Primary Lookups A -> B (B cached)
|
||||
// 1 Primary Lookups A -> B -> C (A, B cached)
|
||||
// 1 Primary Lookups A -> B -> F (A, B cached)
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
// 1 Filter On D
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
@ -1920,7 +1924,11 @@ function complexFilteringSuite () {
|
|||
// 1 Primary Lookups A -> D (D)
|
||||
// 0 Primary Lookups A -> B -> C
|
||||
// 0 Primary Lookups A -> B -> F
|
||||
assertEqual(stats.scannedIndex, 13);
|
||||
// Without traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 13);
|
||||
|
||||
// With traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 24);
|
||||
}
|
||||
// 2 Filter (B, C) too short
|
||||
// 2 Filter (E, G)
|
||||
|
@ -1956,7 +1964,11 @@ function complexFilteringSuite () {
|
|||
// 2 Edge Lookups (0 B) (2 D)
|
||||
// 2 Primary Lookups for Eval (E, G)
|
||||
// 1 Primary Lookups A -> D
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
}
|
||||
// 2 Filter (B, D) too short
|
||||
// 2 Filter (E, G)
|
||||
|
@ -1991,7 +2003,11 @@ function complexFilteringSuite () {
|
|||
// 2 Primary Lookups A -> B
|
||||
// 1 Primary Lookups A -> B -> C
|
||||
// 1 Primary Lookups A -> B -> F
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 8);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 16);
|
||||
}
|
||||
// 1 Filter (A->D)
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
@ -2030,7 +2046,11 @@ function complexFilteringSuite () {
|
|||
// 1 Primary Lookups A -> D
|
||||
// 1 Primary Lookups A -> B -> C
|
||||
// 1 Primary Lookups A -> B -> F
|
||||
assertEqual(stats.scannedIndex, 11);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 11);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 20);
|
||||
}
|
||||
// 2 Filter On (B, D) too short
|
||||
// 2 Filter On (D->E, D->G)
|
||||
|
@ -2082,7 +2102,11 @@ function complexFilteringSuite () {
|
|||
// 1 Primary Lookups A -> B
|
||||
// 1 Primary Lookups A -> B -> C
|
||||
// 1 Primary Lookups A -> B -> F
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
// 1 Filter On D
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
@ -2134,7 +2158,11 @@ function complexFilteringSuite () {
|
|||
// 1 Primary Lookups A -> B
|
||||
// 1 Primary Lookups A -> B -> C
|
||||
// 1 Primary Lookups A -> B -> F
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
// 1 Filter On D
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
@ -2789,7 +2817,15 @@ function optimizeQuantifierSuite() {
|
|||
|
||||
let stats = cursor.getExtra().stats;
|
||||
assertEqual(stats.scannedFull, 0);
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
} else {
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 23);
|
||||
}
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
||||
query = `
|
||||
|
@ -2824,7 +2860,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 8);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
assertEqual(stats.filtered, 2);
|
||||
|
||||
|
@ -2844,7 +2884,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 8);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
assertEqual(stats.filtered, 2);
|
||||
},
|
||||
|
@ -2863,7 +2907,15 @@ function optimizeQuantifierSuite() {
|
|||
|
||||
let stats = cursor.getExtra().stats;
|
||||
assertEqual(stats.scannedFull, 0);
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
} else {
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 23);
|
||||
}
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
||||
query = `
|
||||
|
@ -2898,7 +2950,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 8);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
assertEqual(stats.filtered, 1);
|
||||
|
||||
|
@ -2918,7 +2974,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 8);
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 8);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 18);
|
||||
}
|
||||
assertEqual(stats.filtered, 1);
|
||||
},
|
||||
|
@ -2938,7 +2998,15 @@ function optimizeQuantifierSuite() {
|
|||
|
||||
let stats = cursor.getExtra().stats;
|
||||
assertEqual(stats.scannedFull, 0);
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
} else {
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 17);
|
||||
}
|
||||
assertEqual(stats.filtered, 2);
|
||||
},
|
||||
|
||||
|
@ -2960,7 +3028,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 5);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
// With activated traverser-read-cache:
|
||||
// assertEqual(stats.scannedIndex, 7);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 12);
|
||||
}
|
||||
assertEqual(stats.filtered, 3);
|
||||
},
|
||||
|
@ -2980,7 +3052,15 @@ function optimizeQuantifierSuite() {
|
|||
|
||||
let stats = cursor.getExtra().stats;
|
||||
assertEqual(stats.scannedFull, 0);
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
} else {
|
||||
// With traverser-read-cache
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 17);
|
||||
}
|
||||
assertEqual(stats.filtered, 2);
|
||||
},
|
||||
|
||||
|
@ -3002,7 +3082,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 5);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
// With activated traverser-read-cache:
|
||||
// assertEqual(stats.scannedIndex, 7);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 12);
|
||||
}
|
||||
assertEqual(stats.filtered, 3);
|
||||
},
|
||||
|
@ -3022,7 +3106,15 @@ function optimizeQuantifierSuite() {
|
|||
|
||||
let stats = cursor.getExtra().stats;
|
||||
assertEqual(stats.scannedFull, 0);
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 9);
|
||||
} else {
|
||||
// With activated traverser-read-cache:
|
||||
// assertEqual(stats.scannedIndex, 9);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 17);
|
||||
}
|
||||
assertEqual(stats.filtered, 4);
|
||||
},
|
||||
|
||||
|
@ -3044,7 +3136,11 @@ function optimizeQuantifierSuite() {
|
|||
if (isCluster) {
|
||||
assertEqual(stats.scannedIndex, 5);
|
||||
} else {
|
||||
assertEqual(stats.scannedIndex, 7);
|
||||
// With activated traverser-read-cache:
|
||||
// assertEqual(stats.scannedIndex, 7);
|
||||
|
||||
// Without traverser-read-cache
|
||||
assertEqual(stats.scannedIndex, 12);
|
||||
}
|
||||
assertEqual(stats.filtered, 4);
|
||||
}
|
||||
|
|
|
@ -31,13 +31,13 @@
|
|||
var db = require('@arangodb').db;
|
||||
var internal = require('internal');
|
||||
var jsunity = require('jsunity');
|
||||
var engine = db._engine()["name"]
|
||||
var engine = db._engine()["name"];
|
||||
|
||||
function runSetup () {
|
||||
'use strict';
|
||||
internal.debugClearFailAt();
|
||||
|
||||
if (engine == "mmfiles") {
|
||||
if (engine === "mmfiles") {
|
||||
internal.debugSetFailAt('CreateCollection::tempDirectory');
|
||||
try {
|
||||
db._create('UnitTestsRecovery1');
|
||||
|
@ -82,7 +82,7 @@ function recoverySuite () {
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCreateCollectionFail: function () {
|
||||
if (engine == "mmfiles") {
|
||||
if (engine === "mmfiles") {
|
||||
assertNull(db._collection('UnitTestsRecovery1'));
|
||||
assertNull(db._collection('UnitTestsRecovery2'));
|
||||
}
|
||||
|
|
|
@ -36,59 +36,52 @@ function runSetup () {
|
|||
'use strict';
|
||||
internal.debugClearFailAt();
|
||||
|
||||
{
|
||||
db._drop('UnitTestsRecovery1');
|
||||
var c = db._create('UnitTestsRecovery1', {
|
||||
waitForSync: true,
|
||||
journalSize: 8 * 1024 * 1024,
|
||||
doCompact: false
|
||||
});
|
||||
c.save({ value1: 1, value2: [ 'the',
|
||||
'quick',
|
||||
'brown',
|
||||
'foxx',
|
||||
'jumped',
|
||||
'over',
|
||||
'the',
|
||||
'lazy',
|
||||
'dog',
|
||||
'xxxxxxxxxxx' ] });
|
||||
c.ensureHashIndex('value1');
|
||||
c.ensureSkiplist('value2');
|
||||
}
|
||||
db._drop('UnitTestsRecovery1');
|
||||
var c = db._create('UnitTestsRecovery1', {
|
||||
waitForSync: true,
|
||||
journalSize: 8 * 1024 * 1024,
|
||||
doCompact: false
|
||||
});
|
||||
c.save({ value1: 1, value2: [ 'the',
|
||||
'quick',
|
||||
'brown',
|
||||
'foxx',
|
||||
'jumped',
|
||||
'over',
|
||||
'the',
|
||||
'lazy',
|
||||
'dog',
|
||||
'xxxxxxxxxxx' ] });
|
||||
c.ensureHashIndex('value1');
|
||||
c.ensureSkiplist('value2');
|
||||
|
||||
{
|
||||
db._drop('UnitTestsRecovery2');
|
||||
var c = db._create('UnitTestsRecovery2', {
|
||||
waitForSync: false,
|
||||
journalSize: 16 * 1024 * 1024,
|
||||
doCompact: true,
|
||||
isVolatile: true
|
||||
});
|
||||
c.save({ value1: { 'some': 'rubbish' } });
|
||||
c.ensureSkiplist('value1');
|
||||
}
|
||||
db._drop('UnitTestsRecovery2');
|
||||
c = db._create('UnitTestsRecovery2', {
|
||||
waitForSync: false,
|
||||
journalSize: 16 * 1024 * 1024,
|
||||
doCompact: true,
|
||||
isVolatile: true
|
||||
});
|
||||
c.save({ value1: { 'some': 'rubbish' } });
|
||||
c.ensureSkiplist('value1');
|
||||
|
||||
{
|
||||
db._drop('UnitTestsRecovery3');
|
||||
var c = db._createEdgeCollection('UnitTestsRecovery3', {
|
||||
waitForSync: false,
|
||||
journalSize: 32 * 1024 * 1024,
|
||||
doCompact: true
|
||||
});
|
||||
db._drop('UnitTestsRecovery3');
|
||||
c = db._createEdgeCollection('UnitTestsRecovery3', {
|
||||
waitForSync: false,
|
||||
journalSize: 32 * 1024 * 1024,
|
||||
doCompact: true
|
||||
});
|
||||
|
||||
c.save('UnitTestsRecovery1/foo', 'UnitTestsRecovery2/bar', { value1: { 'some': 'rubbish' } });
|
||||
c.ensureUniqueSkiplist('value1');
|
||||
}
|
||||
c.save('UnitTestsRecovery1/foo', 'UnitTestsRecovery2/bar', { value1: { 'some': 'rubbish' } });
|
||||
c.ensureUniqueSkiplist('value1');
|
||||
|
||||
{
|
||||
db._drop('_UnitTestsRecovery4');
|
||||
var c = db._create('_UnitTestsRecovery4', { isSystem: true });
|
||||
db._drop('_UnitTestsRecovery4');
|
||||
c = db._create('_UnitTestsRecovery4', { isSystem: true });
|
||||
|
||||
c.save({ value42: 42 });
|
||||
c.ensureUniqueConstraint('value42');
|
||||
c.save({ _key: 'crashme' }, true);
|
||||
}
|
||||
c.save({ value42: 42 });
|
||||
c.ensureUniqueConstraint('value42');
|
||||
c.save({ _key: 'crashme' }, true);
|
||||
|
||||
internal.debugSegfault('crashing server');
|
||||
}
|
||||
|
||||
|
@ -116,7 +109,7 @@ function recoverySuite () {
|
|||
prop = c.properties();
|
||||
assertTrue(prop.waitForSync);
|
||||
assertEqual(2, c.type());
|
||||
if (db._engine().name == "mmfiles") {
|
||||
if (db._engine().name === "mmfiles") {
|
||||
assertEqual(8 * 1024 * 1024, prop.journalSize);
|
||||
assertFalse(prop.doCompact);
|
||||
assertFalse(prop.isVolatile);
|
||||
|
@ -127,11 +120,11 @@ function recoverySuite () {
|
|||
|
||||
c = db._collection('UnitTestsRecovery2');
|
||||
// isVolatile has no effect on rocksdb
|
||||
assertEqual(db._engine().name == "mmfiles" ? 0 : 1, c.count());
|
||||
assertEqual(db._engine().name === "mmfiles" ? 0 : 1, c.count());
|
||||
prop = c.properties();
|
||||
assertFalse(prop.waitForSync);
|
||||
assertEqual(2, c.type());
|
||||
if (db._engine().name == "mmfiles") {
|
||||
if (db._engine().name === "mmfiles") {
|
||||
assertEqual(16 * 1024 * 1024, prop.journalSize);
|
||||
assertTrue(prop.doCompact);
|
||||
assertTrue(prop.isVolatile);
|
||||
|
@ -148,7 +141,7 @@ function recoverySuite () {
|
|||
prop = c.properties();
|
||||
assertFalse(prop.waitForSync);
|
||||
assertEqual(3, c.type());
|
||||
if (db._engine().name == "mmfiles") {
|
||||
if (db._engine().name === "mmfiles") {
|
||||
assertEqual(32 * 1024 * 1024, prop.journalSize);
|
||||
assertTrue(prop.doCompact);
|
||||
assertFalse(prop.isVolatile);
|
||||
|
|
|
@ -31,12 +31,12 @@
|
|||
var db = require('@arangodb').db;
|
||||
var internal = require('internal');
|
||||
var jsunity = require('jsunity');
|
||||
var engine = db._engine()["name"]
|
||||
var engine = db._engine()["name"];
|
||||
|
||||
function runSetup () {
|
||||
'use strict';
|
||||
internal.debugClearFailAt();
|
||||
if (engine == "mmfiles") {
|
||||
if (engine === "mmfiles") {
|
||||
internal.debugSetFailAt('CreateDatabase::tempDirectory');
|
||||
try {
|
||||
db._createDatabase('UnitTestsRecovery1');
|
||||
|
@ -81,7 +81,7 @@ function recoverySuite () {
|
|||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCreateDatabaseFail: function () {
|
||||
if (engine == "mmfiles") {
|
||||
if (engine === "mmfiles") {
|
||||
assertEqual(-1, db._databases().indexOf('UnitTestsRecovery1'));
|
||||
assertEqual(-1, db._databases().indexOf('UnitTestsRecovery2'));
|
||||
}
|
||||
|
|
|
@ -679,7 +679,7 @@ inline void TRI_V8_AddMethod(v8::Isolate* isolate, TARGET tpl,
|
|||
bool isHidden = false) {
|
||||
// hidden method
|
||||
if (isHidden) {
|
||||
tpl->Set(name, v8::FunctionTemplate::New(isolate, callback)->GetFunction());
|
||||
tpl->ForceSet(name, v8::FunctionTemplate::New(isolate, callback)->GetFunction(), v8::DontEnum);
|
||||
}
|
||||
// normal method
|
||||
else {
|
||||
|
|
Loading…
Reference in New Issue