1
0
Fork 0

Bug fix/rocksdb truncate (#4060)

This commit is contained in:
Michael Hackstein 2018-01-16 09:01:04 +01:00 committed by GitHub
parent 62aee41411
commit d32ac3b9c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 815 additions and 163 deletions

View File

@ -1,6 +1,10 @@
devel
-----
* Truncate in RocksDB will now do intermediate commits every 10.000 documents
if truncate fails or the server crashes during this operation all deletes
that have been commited so far are persisted.
* make the default value of `--rocksdb.block-cache-shard-bits` use the RocksDB
default value. This will mostly mean the default number block cache shard
bits is lower than before, allowing each shard to store more data and cause

View File

@ -694,10 +694,8 @@ void RocksDBCollection::invokeOnAllElements(
void RocksDBCollection::truncate(transaction::Methods* trx,
OperationOptions& options) {
TRI_ASSERT(_objectId != 0);
TRI_voc_cid_t cid = _logicalCollection->cid();
auto state = RocksDBTransactionState::toState(trx);
RocksDBMethods* mthd = state->rocksdbMethods();
// delete documents
RocksDBKeyBounds documentBounds =
RocksDBKeyBounds::CollectionDocuments(this->objectId());
@ -712,55 +710,66 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
iter->Seek(documentBounds.start());
uint64_t found = 0;
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
++found;
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
LocalDocumentId docId(RocksDBKey::revisionId(RocksDBEntryType::Document, iter->key()));
VPackSlice doc = VPackSlice(iter->value().data());
TRI_ASSERT(doc.isObject());
TRI_voc_rid_t revId =
RocksDBKey::revisionId(RocksDBEntryType::Document, iter->key());
VPackSlice key =
VPackSlice(iter->value().data()).get(StaticStrings::KeyString);
VPackSlice key = doc.get(StaticStrings::KeyString);
TRI_ASSERT(key.isString());
blackListKey(iter->key().data(), static_cast<uint32_t>(iter->key().size()));
// add possible log statement
state->prepareOperation(cid, revId, StringRef(key),
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
Result r =
mthd->Delete(RocksDBColumnFamily::documents(), RocksDBKey(iter->key()));
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
state->prepareOperation(_logicalCollection->cid(), docId.id(),
StringRef(key),TRI_VOC_DOCUMENT_OPERATION_REMOVE);
auto res = removeDocument(trx, docId, doc, options);
if (res.fail()) {
// Failed to remove document in truncate.
// Throw
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
}
// report size of key
RocksDBOperationResult result = state->addOperation(
cid, revId, TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, iter->key().size());
res = state->addOperation(_logicalCollection->cid(), docId.id(),
TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0,
res.keySize());
// transaction size limit reached -- fail
if (result.fail()) {
THROW_ARANGO_EXCEPTION(result);
// transaction size limit reached
if (res.fail()) {
// This should never happen...
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
}
if (found % 10000 == 0) {
state->triggerIntermediateCommit();
}
iter->Next();
}
// delete index items
READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& index : _indexes) {
RocksDBIndex* rindex = static_cast<RocksDBIndex*>(index.get());
rindex->truncate(trx);
if (found > 0) {
_needToPersistIndexEstimates = true;
}
_needToPersistIndexEstimates = true;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
// check if documents have been deleted
if (mthd->countInBounds(documentBounds, true)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"deletion check in collection truncate "
"failed - not all documents have been "
"deleted");
if (state->numCommits() == 0) {
// check if documents have been deleted
if (mthd->countInBounds(documentBounds, true)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"deletion check in collection truncate "
"failed - not all documents have been "
"deleted");
}
}
#endif
TRI_IF_FAILURE("FailAfterAllCommits") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultAfterAllCommits") {
TRI_SegfaultDebugging("SegfaultAfterAllCommits");
}
if (found > 64 * 1024) {
// also compact the ranges in order to speed up all further accesses
// to the collection

View File

@ -999,14 +999,3 @@ void RocksDBEdgeIndex::recalculateEstimates() {
_estimator->insert(hash);
}
}
Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
// blacklist keys during truncate
blackListKey(key.data(), key.size());
uint64_t hash = RocksDBEdgeIndex::HashForKey(key);
_estimator->remove(hash);
return Result();
}

View File

@ -175,10 +175,6 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
virtual std::pair<RocksDBCuckooIndexEstimator<uint64_t>*, uint64_t> estimator() const override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
private:
/// @brief create the iterator
IndexIterator* createEqIterator(transaction::Methods*, ManagedDocumentResult*,

View File

@ -541,12 +541,6 @@ Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx,
return IndexResult();
}
void RocksDBGeoIndex::truncate(transaction::Methods* trx) {
TRI_ASSERT(_geoIndex != nullptr);
RocksDBIndex::truncate(trx);
GeoIndex_reset(_geoIndex, RocksDBTransactionState::toMethods(trx));
}
/// @brief looks up all points within a given radius
GeoCoordinates* RocksDBGeoIndex::withinQuery(transaction::Methods* trx,
double lat, double lon,

View File

@ -138,8 +138,6 @@ class RocksDBGeoIndex final : public RocksDBIndex {
void unload() override {}
void truncate(transaction::Methods*) override;
/// @brief looks up all points within a given radius
arangodb::rocksdbengine::GeoCoordinates* withinQuery(transaction::Methods*,
double, double,

View File

@ -249,63 +249,6 @@ Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* m
return insertInternal(trx, mthd, newDocumentId, newDoc, mode);
}
void RocksDBIndex::truncate(transaction::Methods* trx) {
auto* mthds = RocksDBTransactionState::toMethods(trx);
auto state = RocksDBTransactionState::toState(trx);
RocksDBKeyBounds indexBounds = getBounds(type(), _objectId, _unique);
rocksdb::ReadOptions options = mthds->readOptions();
rocksdb::Slice end = indexBounds.end();
rocksdb::Comparator const* cmp = this->comparator();
options.iterate_upper_bound = &end;
if (type() == RocksDBIndex::TRI_IDX_TYPE_EDGE_INDEX) {
options.prefix_same_as_start = false;
options.total_order_seek = true;
}
options.verify_checksums = false;
options.fill_cache = false;
std::unique_ptr<rocksdb::Iterator> iter = mthds->NewIterator(options, _cf);
iter->Seek(indexBounds.start());
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
// report size of key
RocksDBOperationResult result = state->addInternalOperation(
0, iter->key().size());
// transaction size limit reached -- fail
if (result.fail()) {
THROW_ARANGO_EXCEPTION(result);
}
Result r = mthds->Delete(_cf, RocksDBKey(iter->key()));
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
}
r = postprocessRemove(trx, iter->key(), iter->value());
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
}
iter->Next();
}
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
//check if index entries have been deleted
if (type() != TRI_IDX_TYPE_GEO1_INDEX && type() != TRI_IDX_TYPE_GEO2_INDEX) {
if (mthds->countInBounds(getBounds(), true)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"deletion check in collection truncate "
"failed - not all documents in an index "
"have been deleted");
}
}
#endif
}
/// @brief return the memory usage of the index
size_t RocksDBIndex::memory() const {
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
@ -332,12 +275,6 @@ void RocksDBIndex::cleanup() {
}
}
Result RocksDBIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
return Result();
}
// blacklist given key from transactional cache
void RocksDBIndex::blackListKey(char const* data, std::size_t len) {
if (useCache()) {

View File

@ -82,8 +82,6 @@ class RocksDBIndex : public Index {
void load() override;
void unload() override;
virtual void truncate(transaction::Methods*);
size_t memory() const override;
void cleanup();
@ -153,12 +151,6 @@ class RocksDBIndex : public Index {
virtual std::pair<RocksDBCuckooIndexEstimator<uint64_t>*, uint64_t> estimator() const;
protected:
// Will be called during truncate to allow the index to update selectivity
// estimates, blacklist keys, etc.
virtual Result postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value);
inline bool useCache() const { return (_cacheEnabled && _cachePresent); }
void blackListKey(char const* data, std::size_t len);
void blackListKey(StringRef& ref) { blackListKey(ref.data(), ref.size()); };

View File

@ -339,13 +339,6 @@ arangodb::aql::AstNode* RocksDBPrimaryIndex::specializeCondition(
return matcher.specializeOne(this, node, reference);
}
Result RocksDBPrimaryIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
blackListKey(key.data(), key.size());
return Result();
}
/// @brief create the iterator, for a single attribute, IN operator
IndexIterator* RocksDBPrimaryIndex::createInIterator(
transaction::Methods* trx, ManagedDocumentResult* mmdr,

View File

@ -144,10 +144,6 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
arangodb::velocypack::Slice const&,
OperationMode mode) override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
private:
/// @brief create the iterator, for a single attribute, IN operator
IndexIterator* createInIterator(transaction::Methods*, ManagedDocumentResult*,

View File

@ -529,6 +529,37 @@ uint64_t RocksDBTransactionState::sequenceNumber() const {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "No snapshot set");
}
void RocksDBTransactionState::triggerIntermediateCommit() {
TRI_IF_FAILURE("FailBeforeIntermediateCommit") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultBeforeIntermediateCommit") {
TRI_SegfaultDebugging("SegfaultBeforeIntermediateCommit");
}
TRI_ASSERT(!hasHint(transaction::Hints::Hint::SINGLE_OPERATION));
LOG_TOPIC(DEBUG, Logger::ROCKSDB) << "INTERMEDIATE COMMIT!";
internalCommit();
TRI_IF_FAILURE("FailAfterIntermediateCommit") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultAfterIntermediateCommit") {
TRI_SegfaultDebugging("SegfaultAfterIntermediateCommit");
}
_lastUsedCollection = 0;
_numInternal = 0;
_numInserts = 0;
_numUpdates = 0;
_numRemoves = 0;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
_numLogdata = 0;
#endif
createTransaction();
}
void RocksDBTransactionState::checkIntermediateCommit(uint64_t newSize) {
auto numOperations = _numInserts + _numUpdates + _numRemoves + _numInternal;
// perform an intermediate commit
@ -536,18 +567,7 @@ void RocksDBTransactionState::checkIntermediateCommit(uint64_t newSize) {
// "transaction size" counters have reached their limit
if (_options.intermediateCommitCount <= numOperations ||
_options.intermediateCommitSize <= newSize) {
TRI_ASSERT(!hasHint(transaction::Hints::Hint::SINGLE_OPERATION));
LOG_TOPIC(DEBUG, Logger::ROCKSDB) << "INTERMEDIATE COMMIT!";
internalCommit();
_lastUsedCollection = 0;
_numInternal = 0;
_numInserts = 0;
_numUpdates = 0;
_numRemoves = 0;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
_numLogdata = 0;
#endif
createTransaction();
triggerIntermediateCommit();
}
}

View File

@ -145,6 +145,11 @@ class RocksDBTransactionState final : public TransactionState {
RocksDBKey* leaseRocksDBKey();
/// @brief return a temporary RocksDBKey object. Not thread safe
void returnRocksDBKey(RocksDBKey* key);
/// @brief Trigger an intermediate commit.
/// Handle with care if failing after this commit it will only
/// be rolled back until this point of time.
/// Not thread safe
void triggerIntermediateCommit();
private:
/// @brief create a new rocksdb transaction

View File

@ -1580,16 +1580,6 @@ void RocksDBVPackIndex::recalculateEstimates() {
bounds.columnFamily());
}
Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
if (!unique()) {
uint64_t hash = RocksDBVPackIndex::HashForKey(key);
_estimator->remove(hash);
}
return Result();
}
std::pair<RocksDBCuckooIndexEstimator<uint64_t>*, uint64_t>
RocksDBVPackIndex::estimator() const {
return std::make_pair(_estimator.get(), _estimatorSerializedSeq);

View File

@ -212,9 +212,6 @@ class RocksDBVPackIndex : public RocksDBIndex {
arangodb::velocypack::Slice const&,
OperationMode mode) override;
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
virtual std::pair<RocksDBCuckooIndexEstimator<uint64_t>*, uint64_t> estimator() const override;
private:

View File

@ -32,6 +32,7 @@ var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var ArangoCollection = arangodb.ArangoCollection;
var testHelper = require("@arangodb/test-helper").Helper;
const internal = require("internal");
var db = arangodb.db;
var ERRORS = arangodb.errors;
@ -572,12 +573,278 @@ function CollectionCacheSuite () {
};
}
function CollectionTruncateFailuresSuite() {
const cn = "UnitTestsTruncate";
let c;
const cleanUp = () => {
internal.debugClearFailAt();
try {
db._drop(cn);
} catch(_) { }
};
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
return {
tearDown: cleanUp,
setUp: function () {
cleanUp();
c = db._create(cn);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
},
testTruncateFailsAfterAllCommits: function () {
internal.debugSetFailAt("FailAfterAllCommits");
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 0);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 0);
}
// Test Hash
{
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 1);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 1);
break;
default:
fail();
}
}
}
},
testTruncateFailsBeforeCommit: function () {
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
internal.debugSetFailAt("FailBeforeIntermediateCommit");
internal.print(c.getIndexes(true));
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
internal.print(c.getIndexes(true));
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 20000);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 20000);
}
// Test Hash
{
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, docsWithEqHash);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, docsWithEqSkip);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.0125);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.005);
break;
default:
fail();
}
}
}
},
testTruncateFailsBetweenCommits: function () {
internal.debugSetFailAt("FailAfterIntermediateCommit");
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 10000);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 10000);
}
// Test Hash
{
let sum = 0;
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertTrue(res.length < docsWithEqHash);
sum += res.length;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
let sum = 0;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertTrue(res.length < docsWithEqSkip);
sum += res.length;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
// This may be fuzzy...
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.025);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.01);
break;
default:
fail();
}
}
}
},
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(CollectionSuite);
jsunity.run(CollectionCacheSuite);
//jsunity.run(CollectionSuite);
//jsunity.run(CollectionCacheSuite);
if (internal.debugCanUseFailAt()) {
jsunity.run(CollectionTruncateFailuresSuite);
}
return jsunity.done();

View File

@ -0,0 +1,158 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultAfterAllCommits");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed after all commits, before return
testCollectionCount: () => {
assertEqual(c.count(), 0);
},
// Test that the HashIndex remains intact but empty.
testPrimaryIndex: () => {
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": colName}).toArray();
assertEqual(res.length, 0);
},
// Test that the HashIndex remains intact but empty.
testHashIndex: () => {
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testIndexEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 1);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 1);
break;
default:
fail();
}
}
},
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}

View File

@ -0,0 +1,148 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultBeforeIntermediateCommit");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed before commit
testCollectionCount: () => {
assertEqual(c.count(), 20000);
},
// Test that the HashIndex remains intact.
testHashIndex: () => {
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, docsWithEqHash);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, docsWithEqSkip);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testSelectivityEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.0125);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.005);
break;
default:
fail();
}
}
},
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}

View File

@ -0,0 +1,159 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, assertTrue, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultAfterIntermediateCommit");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed after one remove commit. But before the other
testCollectionCount: () => {
assertEqual(c.count(), 10000);
},
// Test that the HashIndex remains intact.
testHashIndex: () => {
let sum = 0;
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
let c = res.length;
assertTrue(c < docsWithEqHash);
sum += c;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let sum = 0;
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
let c = res.length;
assertTrue(c < docsWithEqSkip);
sum += c;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testSelectivityEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.025);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.01);
break;
default:
fail();
}
}
}
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}