1
0
Fork 0

Bug fix 3.3/rocksdb truncate (#4338)

This commit is contained in:
Michael Hackstein 2018-01-16 19:52:27 +01:00 committed by GitHub
parent da50125e9f
commit ffbc7058cd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 1190 additions and 166 deletions

View File

@ -1,5 +1,23 @@
v3.3.3 (XXXX-XX-XX)
-------------------
-----
* Fixed an issue with the index estimates in RocksDB in the case a transaction is aborted.
Former the index estimates were modified if the transaction commited or not.
Now they will only be modified if the transaction commited successfully.
* UI: optimized login view for very small screen sizes
* UI: optimized error messages for invalid query bind parameter
* Truncate in RocksDB will now do intermediate commits every 10.000 documents
if truncate fails or the server crashes during this operation all deletes
that have been commited so far are persisted.
* make the default value of `--rocksdb.block-cache-shard-bits` use the RocksDB
default value. This will mostly mean the default number block cache shard
bits is lower than before, allowing each shard to store more data and cause
less evictions from block cache
* UI: optimized error messages for invalid query bind parameter

View File

@ -697,10 +697,8 @@ void RocksDBCollection::invokeOnAllElements(
void RocksDBCollection::truncate(transaction::Methods* trx,
OperationOptions& options) {
TRI_ASSERT(_objectId != 0);
TRI_voc_cid_t cid = _logicalCollection->cid();
auto state = RocksDBTransactionState::toState(trx);
RocksDBMethods* mthd = state->rocksdbMethods();
// delete documents
RocksDBKeyBounds documentBounds =
RocksDBKeyBounds::CollectionDocuments(this->objectId());
@ -715,48 +713,51 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
iter->Seek(documentBounds.start());
uint64_t found = 0;
bool wfs = false;
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
++found;
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
LocalDocumentId docId(RocksDBKey::revisionId(RocksDBEntryType::Document, iter->key()));
VPackSlice doc = VPackSlice(iter->value().data());
TRI_ASSERT(doc.isObject());
TRI_voc_rid_t revId =
RocksDBKey::revisionId(RocksDBEntryType::Document, iter->key());
VPackSlice key =
VPackSlice(iter->value().data()).get(StaticStrings::KeyString);
VPackSlice key = doc.get(StaticStrings::KeyString);
TRI_ASSERT(key.isString());
blackListKey(iter->key().data(), static_cast<uint32_t>(iter->key().size()));
// add possible log statement
state->prepareOperation(cid, revId, StringRef(key),
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
Result r =
mthd->Delete(RocksDBColumnFamily::documents(), RocksDBKey(iter->key()));
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
state->prepareOperation(_logicalCollection->cid(), docId.id(),
StringRef(key),TRI_VOC_DOCUMENT_OPERATION_REMOVE);
auto res = removeDocument(trx, docId, doc, options, false, wfs);
if (res.fail()) {
// Failed to remove document in truncate.
// Throw
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
}
// report size of key
RocksDBOperationResult result = state->addOperation(
cid, revId, TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0, iter->key().size());
res = state->addOperation(_logicalCollection->cid(), docId.id(),
TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0,
res.keySize());
// transaction size limit reached -- fail
if (result.fail()) {
THROW_ARANGO_EXCEPTION(result);
// transaction size limit reached
if (res.fail()) {
// This should never happen...
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
}
if (found % 10000 == 0) {
state->triggerIntermediateCommit();
}
iter->Next();
}
// delete index items
READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& index : _indexes) {
RocksDBIndex* rindex = static_cast<RocksDBIndex*>(index.get());
rindex->truncate(trx);
if (found > 0) {
_needToPersistIndexEstimates = true;
}
_needToPersistIndexEstimates = true;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
// check if documents have been deleted
if (state->numIntermediateCommits() == 0 &&
if (state->numCommits() == 0 &&
mthd->countInBounds(documentBounds, true)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"deletion check in collection truncate "
@ -765,6 +766,13 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
}
#endif
TRI_IF_FAILURE("FailAfterAllCommits") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultAfterAllCommits") {
TRI_SegfaultDebugging("SegfaultAfterAllCommits");
}
if (found > 64 * 1024) {
// also compact the ranges in order to speed up all further accesses
// to the collection

View File

@ -469,7 +469,7 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
if (r.ok()) {
std::hash<StringRef> hasher;
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
_estimator->insert(hash);
RocksDBTransactionState::toState(trx)->trackIndexInsert(_collection->cid(), id(), hash);
return IndexResult();
} else {
return IndexResult(r.errorNumber(), this);
@ -500,7 +500,7 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
if (res.ok()) {
std::hash<StringRef> hasher;
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
_estimator->remove(hash);
RocksDBTransactionState::toState(trx)->trackIndexRemove(_collection->cid(), id(), hash);
return IndexResult();
} else {
return IndexResult(res.errorNumber(), this);
@ -991,13 +991,17 @@ void RocksDBEdgeIndex::recalculateEstimates() {
}
}
Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
// blacklist keys during truncate
blackListKey(key.data(), key.size());
void RocksDBEdgeIndex::applyCommitedEstimates(
std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes) {
if (_estimator != nullptr) {
// If we have an estimator apply the changes to it.
for (auto const& hash : inserts) {
_estimator->insert(hash);
}
uint64_t hash = RocksDBEdgeIndex::HashForKey(key);
_estimator->remove(hash);
return Result();
for (auto const& hash : removes) {
_estimator->remove(hash);
}
}
}

View File

@ -173,9 +173,8 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
arangodb::velocypack::Slice const&,
OperationMode mode) override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
virtual void applyCommitedEstimates(std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes) override;
private:
/// @brief create the iterator

View File

@ -541,12 +541,6 @@ Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx,
return IndexResult();
}
void RocksDBGeoIndex::truncate(transaction::Methods* trx) {
TRI_ASSERT(_geoIndex != nullptr);
RocksDBIndex::truncate(trx);
GeoIndex_reset(_geoIndex, RocksDBTransactionState::toMethods(trx));
}
/// @brief looks up all points within a given radius
GeoCoordinates* RocksDBGeoIndex::withinQuery(transaction::Methods* trx,
double lat, double lon,

View File

@ -138,8 +138,6 @@ class RocksDBGeoIndex final : public RocksDBIndex {
void unload() override {}
void truncate(transaction::Methods*) override;
/// @brief looks up all points within a given radius
arangodb::rocksdbengine::GeoCoordinates* withinQuery(transaction::Methods*,
double, double,

View File

@ -249,64 +249,6 @@ Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* m
return insertInternal(trx, mthd, newDocumentId, newDoc, mode);
}
void RocksDBIndex::truncate(transaction::Methods* trx) {
auto* mthds = RocksDBTransactionState::toMethods(trx);
auto state = RocksDBTransactionState::toState(trx);
RocksDBKeyBounds indexBounds = getBounds(type(), _objectId, _unique);
rocksdb::ReadOptions options = mthds->readOptions();
rocksdb::Slice end = indexBounds.end();
rocksdb::Comparator const* cmp = this->comparator();
options.iterate_upper_bound = &end;
if (type() == RocksDBIndex::TRI_IDX_TYPE_EDGE_INDEX) {
options.prefix_same_as_start = false;
options.total_order_seek = true;
}
options.verify_checksums = false;
options.fill_cache = false;
std::unique_ptr<rocksdb::Iterator> iter = mthds->NewIterator(options, _cf);
iter->Seek(indexBounds.start());
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
// report size of key
RocksDBOperationResult result = state->addInternalOperation(
0, iter->key().size());
// transaction size limit reached -- fail
if (result.fail()) {
THROW_ARANGO_EXCEPTION(result);
}
Result r = mthds->Delete(_cf, RocksDBKey(iter->key()));
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
}
r = postprocessRemove(trx, iter->key(), iter->value());
if (!r.ok()) {
THROW_ARANGO_EXCEPTION(r);
}
iter->Next();
}
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
//check if index entries have been deleted
if (type() != TRI_IDX_TYPE_GEO1_INDEX && type() != TRI_IDX_TYPE_GEO2_INDEX) {
if (state->numIntermediateCommits() == 0 &&
mthds->countInBounds(getBounds(), true)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"deletion check in collection truncate "
"failed - not all documents in an index "
"have been deleted");
}
}
#endif
}
/// @brief return the memory usage of the index
size_t RocksDBIndex::memory() const {
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
@ -331,12 +273,6 @@ void RocksDBIndex::cleanup() {
db->CompactRange(opts, _cf, &b, &e);
}
Result RocksDBIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
return Result();
}
// blacklist given key from transactional cache
void RocksDBIndex::blackListKey(char const* data, std::size_t len) {
if (useCache()) {
@ -378,3 +314,11 @@ RocksDBKeyBounds RocksDBIndex::getBounds(Index::IndexType type,
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
}
void RocksDBIndex::applyCommitedEstimates(
std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes) {
// This function is required to be overloaded by indexes with Estimates. All other should not call this function.
// In Production this call will be ignored, it is not critical
TRI_ASSERT(false);
}

View File

@ -81,8 +81,6 @@ class RocksDBIndex : public Index {
void load() override;
void unload() override;
virtual void truncate(transaction::Methods*);
size_t memory() const override;
void cleanup();
@ -149,13 +147,10 @@ class RocksDBIndex : public Index {
static RocksDBKeyBounds getBounds(Index::IndexType type, uint64_t objectId,
bool unique);
protected:
// Will be called during truncate to allow the index to update selectivity
// estimates, blacklist keys, etc.
virtual Result postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value);
virtual void applyCommitedEstimates(std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes);
protected:
inline bool useCache() const { return (_cacheEnabled && _cachePresent); }
void blackListKey(char const* data, std::size_t len);
void blackListKey(StringRef& ref) { blackListKey(ref.data(), ref.size()); };

View File

@ -339,13 +339,6 @@ arangodb::aql::AstNode* RocksDBPrimaryIndex::specializeCondition(
return matcher.specializeOne(this, node, reference);
}
Result RocksDBPrimaryIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
blackListKey(key.data(), key.size());
return Result();
}
/// @brief create the iterator, for a single attribute, IN operator
IndexIterator* RocksDBPrimaryIndex::createInIterator(
transaction::Methods* trx, ManagedDocumentResult* mmdr,

View File

@ -144,10 +144,6 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
arangodb::velocypack::Slice const&,
OperationMode mode) override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
private:
/// @brief create the iterator, for a single attribute, IN operator
IndexIterator* createInIterator(transaction::Methods*, ManagedDocumentResult*,

View File

@ -26,6 +26,7 @@
#include "Cluster/CollectionLockState.h"
#include "Logger/Logger.h"
#include "RocksDBEngine/RocksDBCollection.h"
#include "RocksDBEngine/RocksDBIndex.h"
#include "StorageEngine/TransactionState.h"
#include "Transaction/Hints.h"
#include "Transaction/Methods.h"
@ -282,11 +283,36 @@ void RocksDBTransactionCollection::addOperation(
}
void RocksDBTransactionCollection::commitCounts() {
// Update the index estimates.
TRI_ASSERT(_collection != nullptr);
for (auto const& pair : _trackedIndexOperations) {
auto idx = _collection->lookupIndex(pair.first);
if (idx == nullptr) {
TRI_ASSERT(false); // Index reported estimates, but does not exist
continue;
}
auto ridx = static_cast<RocksDBIndex*>(idx.get());
ridx->applyCommitedEstimates(pair.second.first, pair.second.second);
}
_initialNumberDocuments = _numInserts - _numRemoves;
_operationSize = 0;
_numInserts = 0;
_numUpdates = 0;
_numRemoves = 0;
_trackedIndexOperations.clear();
}
void RocksDBTransactionCollection::trackIndexInsert(uint64_t idxObjectId,
uint64_t hash) {
// First list is Inserts
_trackedIndexOperations[idxObjectId].first.emplace_back(hash);
}
void RocksDBTransactionCollection::trackIndexRemove(uint64_t idxObjectId,
uint64_t hash) {
// Second list is Removes
_trackedIndexOperations[idxObjectId].second.emplace_back(hash);
}
/// @brief lock a collection

View File

@ -90,6 +90,16 @@ class RocksDBTransactionCollection final : public TransactionCollection {
uint64_t operationSize, TRI_voc_rid_t revisionId);
void commitCounts();
/// @brief Every index can track hashes inserted into this index
/// Used to update the estimate after the trx commited
void trackIndexInsert(uint64_t idxObjectId, uint64_t hash);
/// @brief Every index can track hashes removed from this index
/// Used to update the estimate after the trx commited
void trackIndexRemove(uint64_t idxObjectId, uint64_t hash);
private:
/// @brief request a lock for a collection
/// returns TRI_ERROR_LOCKED in case the lock was successfully acquired
@ -110,6 +120,13 @@ class RocksDBTransactionCollection final : public TransactionCollection {
uint64_t _numUpdates;
uint64_t _numRemoves;
bool _usageLocked;
/// @brief A list where all indexes with estimates can store their operations
/// Will be applied to the inserter on commit and not applied on abort
std::unordered_map<uint64_t,
std::pair<std::vector<uint64_t>, std::vector<uint64_t>>>
_trackedIndexOperations;
};
}

View File

@ -271,13 +271,18 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
collection->revision());
engine->counterManager()->updateCounter(coll->objectId(), update);
}
// we need this in case of an intermediate commit. The number of
// initial documents is adjusted and numInserts / removes is set to 0
collection->commitCounts();
}
}
} else {
for (auto& trxCollection : _collections) {
RocksDBTransactionCollection* collection =
static_cast<RocksDBTransactionCollection*>(trxCollection);
// We get here if we have filled indexes. So let us commit counts
collection->commitCounts();
}
// don't write anything if the transaction is empty
result = rocksutils::convertStatus(_rocksTransaction->Rollback());
}
@ -529,6 +534,37 @@ uint64_t RocksDBTransactionState::sequenceNumber() const {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "No snapshot set");
}
void RocksDBTransactionState::triggerIntermediateCommit() {
TRI_IF_FAILURE("FailBeforeIntermediateCommit") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultBeforeIntermediateCommit") {
TRI_SegfaultDebugging("SegfaultBeforeIntermediateCommit");
}
TRI_ASSERT(!hasHint(transaction::Hints::Hint::SINGLE_OPERATION));
LOG_TOPIC(DEBUG, Logger::ROCKSDB) << "INTERMEDIATE COMMIT!";
internalCommit();
TRI_IF_FAILURE("FailAfterIntermediateCommit") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
TRI_IF_FAILURE("SegfaultAfterIntermediateCommit") {
TRI_SegfaultDebugging("SegfaultAfterIntermediateCommit");
}
_lastUsedCollection = 0;
_numInternal = 0;
_numInserts = 0;
_numUpdates = 0;
_numRemoves = 0;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
_numLogdata = 0;
#endif
createTransaction();
}
void RocksDBTransactionState::checkIntermediateCommit(uint64_t newSize) {
auto numOperations = _numInserts + _numUpdates + _numRemoves + _numInternal;
// perform an intermediate commit
@ -536,19 +572,7 @@ void RocksDBTransactionState::checkIntermediateCommit(uint64_t newSize) {
// "transaction size" counters have reached their limit
if (_options.intermediateCommitCount <= numOperations ||
_options.intermediateCommitSize <= newSize) {
TRI_ASSERT(!hasHint(transaction::Hints::Hint::SINGLE_OPERATION));
LOG_TOPIC(DEBUG, Logger::ROCKSDB) << "INTERMEDIATE COMMIT!";
internalCommit();
_lastUsedCollection = 0;
_numInternal = 0;
_numInserts = 0;
_numUpdates = 0;
_numRemoves = 0;
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
_numLogdata = 0;
_numIntermediateCommits++;
#endif
createTransaction();
triggerIntermediateCommit();
}
}
@ -577,6 +601,25 @@ void RocksDBTransactionState::returnRocksDBKey(RocksDBKey* key) {
}
}
void RocksDBTransactionState::trackIndexInsert(TRI_voc_cid_t cid, TRI_idx_iid_t idxId, uint64_t hash) {
auto col = findCollection(cid);
if (col != nullptr) {
static_cast<RocksDBTransactionCollection*>(col)->trackIndexInsert(idxId, hash);
} else {
TRI_ASSERT(false);
}
}
void RocksDBTransactionState::trackIndexRemove(TRI_voc_cid_t cid, TRI_idx_iid_t idxId, uint64_t hash) {
auto col = findCollection(cid);
if (col != nullptr) {
static_cast<RocksDBTransactionCollection*>(col)->trackIndexRemove(idxId, hash);
} else {
TRI_ASSERT(false);
}
}
/// @brief constructor, leases a builder
RocksDBKeyLeaser::RocksDBKeyLeaser(transaction::Methods* trx)
: _rtrx(RocksDBTransactionState::toState(trx)),

View File

@ -83,11 +83,6 @@ class RocksDBTransactionState final : public TransactionState {
uint64_t numInserts() const { return _numInserts; }
uint64_t numUpdates() const { return _numUpdates; }
uint64_t numRemoves() const { return _numRemoves; }
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
uint64_t numIntermediateCommits() const {
return _numIntermediateCommits;
};
#endif
/// @brief reset previous log state after a rollback to safepoint
void resetLogState() { _lastUsedCollection = 0; }
@ -150,6 +145,19 @@ class RocksDBTransactionState final : public TransactionState {
RocksDBKey* leaseRocksDBKey();
/// @brief return a temporary RocksDBKey object. Not thread safe
void returnRocksDBKey(RocksDBKey* key);
/// @brief Trigger an intermediate commit.
/// Handle with care if failing after this commit it will only
/// be rolled back until this point of time.
/// Not thread safe
void triggerIntermediateCommit();
/// @brief Every index can track hashes inserted into this index
/// Used to update the estimate after the trx commited
void trackIndexInsert(TRI_voc_cid_t cid, TRI_idx_iid_t idxObjectId, uint64_t hash);
/// @brief Every index can track hashes removed from this index
/// Used to update the estimate after the trx commited
void trackIndexRemove(TRI_voc_cid_t cid, TRI_idx_iid_t idxObjectId, uint64_t hash);
private:
/// @brief create a new rocksdb transaction
@ -188,7 +196,6 @@ class RocksDBTransactionState final : public TransactionState {
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
/// store the number of log entries in WAL
uint64_t _numLogdata = 0;
uint64_t _numIntermediateCommits = 0;
#endif
SmallVector<RocksDBKey*, 32>::allocator_type::arena_type _arena;
SmallVector<RocksDBKey*, 32> _keys;

View File

@ -609,10 +609,11 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
}
if (res == TRI_ERROR_NO_ERROR) {
auto state = RocksDBTransactionState::toState(trx);
for (auto& it : hashes) {
// The estimator is only useful if we are in a non-unique indexes
TRI_ASSERT(!_unique);
_estimator->insert(it);
state->trackIndexInsert(_collection->cid(), id(), it);
}
}
@ -736,10 +737,11 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
}
if (res == TRI_ERROR_NO_ERROR) {
auto state = RocksDBTransactionState::toState(trx);
for (auto& it : hashes) {
// The estimator is only useful if we are in a non-unique indexes
TRI_ASSERT(!_unique);
_estimator->remove(it);
state->trackIndexRemove(_collection->cid(), id(), it);
}
}
@ -1577,12 +1579,17 @@ void RocksDBVPackIndex::recalculateEstimates() {
bounds.columnFamily());
}
Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx,
rocksdb::Slice const& key,
rocksdb::Slice const& value) {
if (!unique()) {
uint64_t hash = RocksDBVPackIndex::HashForKey(key);
_estimator->remove(hash);
void RocksDBVPackIndex::applyCommitedEstimates(
std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes) {
if (_estimator != nullptr) {
// If we have an estimator apply the changes to it.
for (auto const& hash : inserts) {
_estimator->insert(hash);
}
for (auto const& hash : removes) {
_estimator->remove(hash);
}
}
return Result();
}

View File

@ -212,8 +212,8 @@ class RocksDBVPackIndex : public RocksDBIndex {
arangodb::velocypack::Slice const&,
OperationMode mode) override;
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
virtual void applyCommitedEstimates(std::vector<uint64_t> const& inserts,
std::vector<uint64_t> const& removes) override;
private:
bool isDuplicateOperator(arangodb::aql::AstNode const*,

View File

@ -231,6 +231,13 @@ TransactionCollection* TransactionState::findCollection(
}
/// @brief find a collection in the transaction's list of collections
/// The idea is if a collection is found it will be returned.
/// In this case the position is not used.
/// In case the collection is not found. It will return a
/// nullptr and the position will be set. The position
/// defines where the collection should be inserted,
/// so whenever we want to insert the collection we
/// have to use this position for insert.
TransactionCollection* TransactionState::findCollection(
TRI_voc_cid_t cid, size_t& position) const {
size_t const n = _collections.size();

View File

@ -32,6 +32,7 @@ var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var ArangoCollection = arangodb.ArangoCollection;
var testHelper = require("@arangodb/test-helper").Helper;
const internal = require("internal");
var db = arangodb.db;
var ERRORS = arangodb.errors;
@ -572,7 +573,6 @@ function CollectionCacheSuite () {
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,5 +1,5 @@
/* jshint globalstrict:false, strict:false */
/* global assertEqual, assertTrue, assertFalse, assertNotNull */
/* global assertEqual, assertTrue, assertFalse, assertNotNull, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief test the document interface
@ -399,7 +399,42 @@ function EdgeIndexSuite () {
}
edge.save(vn + '/from' + (i % 20), vn + '/to' + i, { });
}
}
},
testIndexSelectivityAfterAbortion: function () {
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({_from: `${vn}/from${i % 32}`, _to: `${vn}/to${i % 47}`});
}
edge.save(docs);
let idx = edge.getIndexes()[1];
let estimateBefore = idx.selectivityEstimate;
try {
internal.db._executeTransaction({
collections: {write: en},
action: function () {
const vn = 'UnitTestsCollectionVertex';
const en = 'UnitTestsCollectionEdge';
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({_from: `${vn}/from${i % 32}`, _to: `${vn}/to${i % 47}`});
}
// This should significantly modify the estimate
// if successful
require('@arangodb').db[en].save(docs);
throw {errorMessage: "banana"};
}
});
fail();
} catch (e) {
assertEqual(e.errorMessage, "banana");
// Insert failed.
// Validate that estimate is non modified
idx = edge.getIndexes()[1];
assertEqual(idx.selectivityEstimate, estimateBefore);
}
},
};
}

View File

@ -1,5 +1,5 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertTrue, assertEqual */
/*global assertEqual, assertTrue, fail */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the hash index, selectivity estimates
@ -150,7 +150,47 @@ function HashIndexSuite() {
idx = collection.ensureHashIndex("value");
assertTrue(idx.selectivityEstimate <= (2 / 3000 + 0.0001));
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief Validate that selectivity estimate is not modified if the transaction
/// is aborted.
////////////////////////////////////////////////////////////////////////////////
testSelectivityAfterAbortion : function () {
let idx = collection.ensureHashIndex("value");
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({value: i % 100});
}
collection.save(docs);
idx = collection.ensureHashIndex("value");
assertTrue(idx.selectivityEstimate === 100 / 1000);
try {
internal.db._executeTransaction({
collections: {write: cn},
action: function () {
const cn = "UnitTestsCollectionHash";
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({value: 1});
}
// This should significantly modify the estimate
// if successful
require('@arangodb').db[cn].save(docs);
throw {errorMessage: "banana"};
}
});
fail();
} catch (e) {
assertEqual(e.errorMessage, "banana");
// Insert failed.
// Validate that estimate is non modified
idx = collection.ensureHashIndex("value");
assertTrue(idx.selectivityEstimate === 100 / 1000);
}
},
};
}

View File

@ -0,0 +1,121 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertTrue, fail */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the skiplist index, selectivity estimates
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
/// @author Copyright 2018, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var internal = require("internal");
function SkiplistIndexSuite() {
'use strict';
var cn = "UnitTestsCollectionSkip";
var collection = null;
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
internal.db._drop(cn);
collection = internal.db._create(cn);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
// try...catch is necessary as some tests delete the collection itself!
try {
collection.unload();
collection.drop();
}
catch (err) {
}
collection = null;
internal.wait(0.0);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief Validate that selectivity estimate is not modified if the transaction
/// is aborted.
////////////////////////////////////////////////////////////////////////////////
testSelectivityAfterAbortion : function () {
let idx = collection.ensureSkiplist("value");
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({value: i % 100});
}
collection.save(docs);
idx = collection.ensureSkiplist("value");
let oldEstimate = idx.selectivityEstimate;
assertTrue(oldEstimate > 0);
assertTrue(oldEstimate < 1);
try {
internal.db._executeTransaction({
collections: {write: cn},
action: function () {
const cn = "UnitTestsCollectionSkip";
let docs = [];
for (let i = 0; i < 1000; ++i) {
docs.push({value: 1});
}
// This should significantly modify the estimate
// if successful
require('@arangodb').db[cn].save(docs);
throw {errorMessage: "banana"};
}
});
fail();
} catch (e) {
assertEqual(e.errorMessage, "banana");
// Insert failed.
// Validate that estimate is non modified
idx = collection.ensureSkiplist("value");
assertEqual(idx.selectivityEstimate, oldEstimate);
}
},
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(SkiplistIndexSuite);
return jsunity.done();

View File

@ -142,6 +142,7 @@ function optimizerIndexesSortTestSuite () {
return node.type;
});
require("internal").db._explain(query);
assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query);
if (!require("@arangodb/cluster").isCluster()) {
assertEqual(-1, nodeTypes.indexOf("SortNode"), query);

View File

@ -0,0 +1,158 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultAfterAllCommits");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed after all commits, before return
testCollectionCount: () => {
assertEqual(c.count(), 0);
},
// Test that the HashIndex remains intact but empty.
testPrimaryIndex: () => {
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": colName}).toArray();
assertEqual(res.length, 0);
},
// Test that the HashIndex remains intact but empty.
testHashIndex: () => {
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testIndexEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 1);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 1);
break;
default:
fail();
}
}
},
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}

View File

@ -0,0 +1,148 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultBeforeIntermediateCommit");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed before commit
testCollectionCount: () => {
assertEqual(c.count(), 20000);
},
// Test that the HashIndex remains intact.
testHashIndex: () => {
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, docsWithEqHash);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
assertEqual(res.length, docsWithEqSkip);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testSelectivityEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.0125);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.005);
break;
default:
fail();
}
}
},
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}

View File

@ -0,0 +1,159 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertFalse, assertTrue, fail */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for truncate on rocksdb with intermediate commits & failures
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
// /
// / @author Michael Hackstein
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
'use strict';
const db = require('@arangodb').db;
const internal = require('internal');
const jsunity = require('jsunity');
const colName = "UnitTestsRecovery";
const runSetup = function () {
internal.debugClearFailAt();
db._drop(colName);
const c = db._create(colName);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
internal.debugSetFailAt("SegfaultAfterIntermediateCommit");
// This will crash the server
c.truncate();
fail();
};
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
const recoverySuite = function () {
jsunity.jsUnity.attachAssertions();
const c = db._collection(colName);
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
return {
setUp: function () {},
tearDown: function () {},
// Test that count of collection remains unmodified.
// We crashed after one remove commit. But before the other
testCollectionCount: () => {
assertEqual(c.count(), 10000);
},
// Test that the HashIndex remains intact.
testHashIndex: () => {
let sum = 0;
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
let c = res.length;
assertTrue(c < docsWithEqHash);
sum += c;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 251}).toArray();
assertEqual(res2.length, 0);
},
// Test that the SkiplistIndex remains intact.
testSkiplistIndex: () => {
let sum = 0;
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": colName, i: i}).toArray();
let c = res.length;
assertTrue(c < docsWithEqSkip);
sum += c;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": colName, i: 101}).toArray();
assertEqual(res2.length, 0);
},
testSelectivityEstimates: () => {
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.025);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.01);
break;
default:
fail();
}
}
}
};
};
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (internal.debugCanUseFailAt()) {
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.done().status ? 0 : 1;
}
} else {
return jsunity.done();
}
}

View File

@ -0,0 +1,306 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertTrue, assertEqual, assertTypeOf, assertNotEqual, fail, assertFalse */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the collection interface
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2018 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Michael Hackstein
/// @author Copyright 2018, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var db = arangodb.db;
const internal = require("internal");
var ERRORS = arangodb.errors;
function CollectionTruncateFailuresSuite() {
'use strict';
const cn = "UnitTestsTruncate";
let c;
const cleanUp = () => {
internal.debugClearFailAt();
try {
db._drop(cn);
} catch(_) { }
};
const docs = [];
for (let i = 0; i < 10000; ++i) {
docs.push({value: i % 250, value2: i % 100});
}
return {
tearDown: cleanUp,
setUp: function () {
cleanUp();
c = db._create(cn);
c.ensureHashIndex("value");
c.ensureSkiplist("value2");
// Add two packs of 10.000 Documents.
// Intermediate commits will commit after 10.000 removals
c.save(docs);
c.save(docs);
},
testTruncateFailsAfterAllCommits: function () {
internal.debugSetFailAt("FailAfterAllCommits");
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 0);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 0);
}
// Test Hash
{
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, 0);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 1);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 1);
break;
default:
fail();
}
}
}
},
testTruncateFailsBeforeCommit: function () {
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
internal.debugSetFailAt("FailBeforeIntermediateCommit");
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 20000);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 20000);
}
// Test Hash
{
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, docsWithEqHash);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertEqual(res.length, docsWithEqSkip);
}
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.0125);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.005);
break;
default:
fail();
}
}
}
},
testTruncateFailsBetweenCommits: function () {
internal.debugSetFailAt("FailAfterIntermediateCommit");
const docsWithEqHash = 20000 / 250;
const docsWithEqSkip = 20000 / 100;
try {
c.truncate();
fail();
} catch (e) {
// Validate that we died with debug
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
}
// All docments should be removed through intermediate commits.
// We have two packs that fill up those commits.
// Now validate that we endup with an empty collection.
assertEqual(c.count(), 10000);
// Test Primary
{
let q = `FOR x IN @@c RETURN x._key`;
let res = db._query(q, {"@c": cn}).toArray();
assertEqual(res.length, 10000);
}
// Test Hash
{
let sum = 0;
let q = `FOR x IN @@c FILTER x.value == @i RETURN x`;
for (let i = 0; i < 250; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertTrue(res.length < docsWithEqHash);
sum += res.length;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 251}).toArray();
assertEqual(res2.length, 0);
}
// Test Skiplist
{
let q = `FOR x IN @@c FILTER x.value2 == @i RETURN x`;
let sum = 0;
for (let i = 0; i < 100; ++i) {
// This validates that all documents can be found again
let res = db._query(q, {"@c": cn, i: i}).toArray();
assertTrue(res.length < docsWithEqSkip);
sum += res.length;
}
assertEqual(sum, 10000);
// just validate that no other values are inserted.
let res2 = db._query(q, {"@c": cn, i: 101}).toArray();
assertEqual(res2.length, 0);
}
// Test Selectivity Estimates
// This may be fuzzy...
{
let indexes = c.getIndexes(true);
for (let i of indexes) {
switch (i.type) {
case 'primary':
assertEqual(i.selectivityEstimate, 1);
break;
case 'hash':
assertEqual(i.selectivityEstimate, 0.025);
break;
case 'skiplist':
assertEqual(i.selectivityEstimate, 0.01);
break;
default:
fail();
}
}
}
},
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
if (internal.debugCanUseFailAt()) {
jsunity.run(CollectionTruncateFailuresSuite);
}
return jsunity.done();