1
0
Fork 0

Remove Obsolete code (#8657)

This commit is contained in:
Simon 2019-04-03 13:40:44 +02:00 committed by Jan
parent 744845c9bb
commit 7cd84a785a
31 changed files with 208 additions and 219 deletions

View File

@ -853,7 +853,7 @@ arangodb::Result MoveShard::abort() {
// Current preconditions for all shards
doForAllShards(
_snapshot, _database, shardsLikeMe,
[this, &trx](
[&trx](
Slice plan, Slice current, std::string& planPath, std::string& curPath) {
// Current still as is
trx.add(curPath, current);

View File

@ -1002,7 +1002,7 @@ std::string ClusterComm::createCommunicatorDestination(std::string const& endpoi
}
httpEndpoint.append(path);
return std::move(httpEndpoint);
return httpEndpoint;
}
std::pair<ClusterCommResult*, HttpRequest*> ClusterComm::prepareRequest(

View File

@ -61,7 +61,6 @@ class RestHandler : public std::enable_shared_from_this<RestHandler> {
uint64_t messageId() const;
GeneralRequest const* request() const { return _request.get(); }
std::unique_ptr<GeneralRequest> stealRequest() { return std::move(_request); }
GeneralResponse* response() const { return _response.get(); }
std::unique_ptr<GeneralResponse> stealResponse() {

View File

@ -699,10 +699,6 @@ arangodb::Result IResearchLink::drop() {
return arangodb::Result();
}
bool IResearchLink::hasBatchInsert() const {
return true;
}
bool IResearchLink::hasSelectivityEstimate() const {
return false; // selectivity can only be determined per query since multiple fields are indexed
}
@ -1772,4 +1768,4 @@ arangodb::Result IResearchLink::unload() {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -115,7 +115,6 @@ class IResearchLink {
////////////////////////////////////////////////////////////////////////////////
arangodb::Result drop(); // arangodb::Index override
bool hasBatchInsert() const; // arangodb::Index override
bool hasSelectivityEstimate() const; // arangodb::Index override
//////////////////////////////////////////////////////////////////////////////
@ -302,4 +301,4 @@ class IResearchLink {
} // namespace iresearch
} // namespace arangodb
#endif
#endif

View File

@ -62,10 +62,6 @@ class IResearchLinkCoordinator final : public arangodb::ClusterIndex, public IRe
//////////////////////////////////////////////////////////////////////////////
static arangodb::IndexTypeFactory const& factory();
virtual bool hasBatchInsert() const override {
return IResearchLink::hasBatchInsert();
}
virtual bool hasSelectivityEstimate() const override {
return IResearchLink::hasSelectivityEstimate();
}

View File

@ -60,10 +60,6 @@ class IResearchMMFilesLink final : public arangodb::MMFilesIndex, public IResear
//////////////////////////////////////////////////////////////////////////////
static arangodb::IndexTypeFactory const& factory();
virtual bool hasBatchInsert() const override {
return IResearchLink::hasBatchInsert();
}
virtual bool hasSelectivityEstimate() const override {
return IResearchLink::hasSelectivityEstimate();
}

View File

@ -53,10 +53,6 @@ class IResearchRocksDBLink final : public arangodb::RocksDBIndex, public IResear
//////////////////////////////////////////////////////////////////////////////
static arangodb::IndexTypeFactory const& factory();
virtual bool hasBatchInsert() const override {
return IResearchLink::hasBatchInsert();
}
virtual bool hasSelectivityEstimate() const override {
return IResearchLink::hasSelectivityEstimate();
}

View File

@ -628,9 +628,6 @@ Result Index::sizeHint(transaction::Methods& trx, size_t size) {
return Result(); // do nothing
}
/// @brief default implementation for hasBatchInsert
bool Index::hasBatchInsert() const { return false; }
/// @brief default implementation for supportsFilterCondition
bool Index::supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const&,
arangodb::aql::AstNode const*,

View File

@ -212,6 +212,8 @@ class Index {
static IndexType type(char const* type, size_t len);
static IndexType type(std::string const& type);
public:
virtual char const* typeName() const = 0;
@ -337,8 +339,6 @@ class Index {
// give index a hint about the expected size
virtual Result sizeHint(transaction::Methods& trx, size_t size);
virtual bool hasBatchInsert() const;
virtual bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
arangodb::aql::AstNode const*,
arangodb::aql::Variable const*, size_t,
@ -372,7 +372,7 @@ class Index {
std::shared_ptr<basics::LocalTaskQueue> queue);
static size_t sortWeight(arangodb::aql::AstNode const* node);
protected:
/// @brief return the name of the (sole) index attribute
/// it is only allowed to call this method if the index contains a

View File

@ -176,8 +176,6 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
Result sizeHint(transaction::Methods& trx, size_t size) override;
bool hasBatchInsert() const override { return true; }
TRI_MMFilesEdgeIndexHash_t* from() const { return _edgesFrom.get(); }
TRI_MMFilesEdgeIndexHash_t* to() const { return _edgesTo.get(); }

View File

@ -268,8 +268,6 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
Result sizeHint(transaction::Methods& trx, size_t size) override;
bool hasBatchInsert() const override { return true; }
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
arangodb::aql::AstNode const*,
arangodb::aql::Variable const*, size_t, size_t&,

View File

@ -147,7 +147,7 @@ Result RocksDBBuilderIndex::remove(transaction::Methods& trx, RocksDBMethods* mt
}
// fast mode assuming exclusive access locked from outside
template <typename WriteBatchType, typename MethodsType>
template <typename WriteBatchType, typename MethodsType, bool foreground>
static arangodb::Result fillIndex(RocksDBIndex& ridx, WriteBatchType& batch,
rocksdb::Snapshot const* snap) {
// fillindex can be non transactional, we just need to clean up
@ -204,8 +204,18 @@ static arangodb::Result fillIndex(RocksDBIndex& ridx, WriteBatchType& batch,
TRI_ASSERT(ridx.hasSelectivityEstimate() && ops.size() == 1);
auto it = ops.begin();
TRI_ASSERT(ridx.id() == it->first);
ridx.estimator()->bufferUpdates(seq, std::move(it->second.inserts),
std::move(it->second.removals));
if (foreground) {
for (uint64_t hash : it->second.inserts) {
ridx.estimator()->insert(hash);
}
for (uint64_t hash : it->second.removals) {
ridx.estimator()->remove(hash);
}
} else {
ridx.estimator()->bufferUpdates(seq, std::move(it->second.inserts),
std::move(it->second.removals));
}
}
};
@ -266,12 +276,12 @@ arangodb::Result RocksDBBuilderIndex::fillIndexForeground() {
// unique index. we need to keep track of all our changes because we need to
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods>(*internal, batch, snap);
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods, true>(*internal, batch, snap);
} else {
// non-unique index. all index keys will be unique anyway because they
// contain the document id we can therefore get away with a cheap WriteBatch
rocksdb::WriteBatch batch(32 * 1024 * 1024);
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods>(*internal, batch, snap);
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods, true>(*internal, batch, snap);
}
return res;
@ -544,19 +554,20 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
});
locker.unlock();
// Step 1. Capture with snapshot
if (internal->unique()) {
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();
// unique index. we need to keep track of all our changes because we need to
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods>(*internal, batch, snap);
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods, false>(*internal, batch, snap);
} else {
// non-unique index. all index keys will be unique anyway because they
// contain the document id we can therefore get away with a cheap WriteBatch
rocksdb::WriteBatch batch(32 * 1024 * 1024);
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods>(*internal, batch, snap);
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods, false>(*internal, batch, snap);
}
if (res.fail()) {
return res;
}
@ -565,6 +576,7 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
rootDB->ReleaseSnapshot(snap);
snap = nullptr;
// Step 2. Scan the WAL for documents without lock
int maxCatchups = 3;
rocksdb::SequenceNumber lastScanned = 0;
uint64_t numScanned = 0;
@ -589,7 +601,7 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
if (res.fail()) {
return res;
}
scanFrom = lastScanned;
} while (maxCatchups-- > 0 && numScanned > 5000);
@ -597,6 +609,8 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
return res.reset(TRI_ERROR_LOCK_TIMEOUT);
}
// Step 3. Scan the WAL for documents with a lock
scanFrom = lastScanned;
if (internal->unique()) {
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();

View File

@ -217,7 +217,7 @@ void RocksDBCollection::unload() {
TRI_voc_rid_t RocksDBCollection::revision() const { return _revisionId; }
TRI_voc_rid_t RocksDBCollection::revision(transaction::Methods* trx) const {
auto state = RocksDBTransactionState::toState(trx);
auto* state = RocksDBTransactionState::toState(trx);
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
state->findCollection(_logicalCollection.id()));
@ -230,7 +230,7 @@ uint64_t RocksDBCollection::numberDocuments() const { return _numberDocuments; }
uint64_t RocksDBCollection::numberDocuments(transaction::Methods* trx) const {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
auto state = RocksDBTransactionState::toState(trx);
auto* state = RocksDBTransactionState::toState(trx);
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
state->findCollection(_logicalCollection.id()));
@ -787,13 +787,13 @@ bool RocksDBCollection::lookupRevision(transaction::Methods* trx, VPackSlice con
Result RocksDBCollection::read(transaction::Methods* trx,
arangodb::velocypack::StringRef const& key,
ManagedDocumentResult& result, bool) {
ManagedDocumentResult& result, bool /*lock*/) {
LocalDocumentId const documentId = primaryIndex()->lookupKey(trx, key);
if (documentId.isSet()) {
return lookupDocumentVPack(documentId, trx, result, true);
if (!documentId.isSet()) {
return Result(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
}
// not found
return Result(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
// found
return lookupDocumentVPack(documentId, trx, result, /*withCache*/true);
}
// read using a token!
@ -801,7 +801,7 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
LocalDocumentId const& documentId,
ManagedDocumentResult& result) const {
if (documentId.isSet()) {
auto res = lookupDocumentVPack(documentId, trx, result, true);
auto res = lookupDocumentVPack(documentId, trx, result, /*withCache*/true);
return res.ok();
}
return false;
@ -872,7 +872,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_INSERT);
auto state = RocksDBTransactionState::toState(trx);
auto* state = RocksDBTransactionState::toState(trx);
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_INSERT);
res = insertDocument(trx, documentId, newSlice, options);
@ -909,7 +909,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice,
ManagedDocumentResult& mdr, OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick, bool,
TRI_voc_tick_t& resultMarkerTick, bool /*lock*/,
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock) {
resultMarkerTick = 0;
@ -919,16 +919,15 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
return Result(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
}
LocalDocumentId const documentId = LocalDocumentId::create();
LocalDocumentId const newDocumentId = LocalDocumentId::create();
auto isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
Result res = this->read(trx, key, previous, /*lock*/ false);
if (res.fail()) {
return res;
}
TRI_ASSERT(!previous.empty());
LocalDocumentId const oldDocumentId = previous.localDocumentId();
VPackSlice oldDoc(previous.vpack());
TRI_voc_rid_t const oldRevisionId = transaction::helpers::extractRevFromDocument(oldDoc);
@ -982,12 +981,12 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
VPackSlice const newDoc(builder->slice());
auto state = RocksDBTransactionState::toState(trx);
auto* state = RocksDBTransactionState::toState(trx);
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
// add possible log statement under guard
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
res = updateDocument(trx, oldDocumentId, oldDoc, newDocumentId, newDoc, options);
if (res.ok()) {
trackWaitForSync(trx, options);
@ -995,7 +994,7 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
if (options.silent) {
mdr.clear();
} else {
mdr.setManaged(newDoc.begin(), documentId);
mdr.setManaged(newDoc.begin(), newDocumentId);
TRI_ASSERT(!mdr.empty());
}
@ -1027,15 +1026,15 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
std::function<Result(void)> callbackDuringLock) {
resultMarkerTick = 0;
bool const isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
// get the previous revision
VPackSlice key = newSlice.get(StaticStrings::KeyString);
Result res;
if (key.isNone()) {
return res.reset(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
}
LocalDocumentId const newDocumentId = LocalDocumentId::create();
bool const isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
// get the previous revision
res = this->read(trx, key, previous, /*lock*/ false);
@ -1063,8 +1062,6 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
}
}
LocalDocumentId const documentId = LocalDocumentId::create();
// merge old and new values
TRI_voc_rid_t revisionId;
transaction::BuilderLeaser builder(trx);
@ -1087,13 +1084,13 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
VPackSlice const newDoc(builder->slice());
auto state = RocksDBTransactionState::toState(trx);
auto* state = RocksDBTransactionState::toState(trx);
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
// add possible log statement under guard
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
res = updateDocument(trx, oldDocumentId, oldDoc, newDocumentId, newDoc, options);
if (res.ok()) {
trackWaitForSync(trx, options);
@ -1101,7 +1098,7 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
if (options.silent) {
mdr.clear();
} else {
mdr.setManaged(newDoc.begin(), documentId);
mdr.setManaged(newDoc.begin(), newDocumentId);
TRI_ASSERT(!mdr.empty());
}
@ -1337,6 +1334,7 @@ Result RocksDBCollection::updateDocument(transaction::Methods* trx,
RocksDBKeyLeaser key(trx);
key->constructDocument(_objectId, oldDocumentId);
TRI_ASSERT(key->containsLocalDocumentId(oldDocumentId));
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
rocksdb::Status s = mthds->SingleDelete(RocksDBColumnFamily::documents(), key.ref());
@ -1345,8 +1343,8 @@ Result RocksDBCollection::updateDocument(transaction::Methods* trx,
}
key->constructDocument(_objectId, newDocumentId);
// simon: we do not need to blacklist the new documentId
TRI_ASSERT(key->containsLocalDocumentId(newDocumentId));
// simon: we do not need to blacklist the new documentId
s = mthds->PutUntracked(RocksDBColumnFamily::documents(), key.ref(),
rocksdb::Slice(newDoc.startAs<char>(),
static_cast<size_t>(newDoc.byteSize())));

View File

@ -93,7 +93,7 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t objectId
return rocks->mapObjectToCollection(objectId);
}
std::tuple<TRI_voc_tick_t, TRI_voc_cid_t, TRI_idx_iid_t> mapObjectToIndex(uint64_t objectId) {
RocksDBEngine::IndexTriple mapObjectToIndex(uint64_t objectId) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
TRI_ASSERT(engine != nullptr);
RocksDBEngine* rocks = static_cast<RocksDBEngine*>(engine);

View File

@ -71,7 +71,7 @@ arangodb::Result globalRocksDBRemove(rocksdb::ColumnFamilyHandle* cf,
uint64_t latestSequenceNumber();
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
std::tuple<TRI_voc_tick_t, TRI_voc_cid_t, TRI_idx_iid_t> mapObjectToIndex(uint64_t);
RocksDBEngine::IndexTriple mapObjectToIndex(uint64_t);
/// @brief count all keys in the given column family
std::size_t countKeys(rocksdb::DB*, rocksdb::ColumnFamilyHandle* cf);

View File

@ -92,8 +92,6 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
void toVelocyPack(VPackBuilder&, std::underlying_type<Index::Serialize>::type) const override;
bool hasBatchInsert() const override { return false; }
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
arangodb::aql::AstNode const*,
arangodb::aql::Variable const*, size_t, size_t&,

View File

@ -238,21 +238,22 @@ void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
createCache();
TRI_ASSERT(_cachePresent);
}
}
}
Result RocksDBIndex::update(transaction::Methods& trx, RocksDBMethods* mthd,
LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc, LocalDocumentId const& newDocumentId,
velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc, Index::OperationMode mode) {
// It is illegal to call this method on the primary index
// RocksDBPrimaryIndex must override this method accordingly
TRI_ASSERT(type() != TRI_IDX_TYPE_PRIMARY_INDEX);
/// only if the insert needs to see the changes of the update, enable indexing:
IndexingEnabler enabler(mthd, mthd->isIndexingDisabled() && hasExpansion() && unique());
TRI_ASSERT((hasExpansion() && unique()) ? !mthd->isIndexingDisabled() : true);
Result res = remove(trx, mthd, oldDocumentId, oldDoc, mode);
if (!res.ok()) {
return res;

View File

@ -406,7 +406,7 @@ TRI_voc_cid_t RocksDBKey::viewId(char const* data, size_t size) {
TRI_voc_cid_t RocksDBKey::objectId(char const* data, size_t size) {
TRI_ASSERT(data != nullptr);
TRI_ASSERT(size > sizeof(uint64_t));
TRI_ASSERT(size >= sizeof(uint64_t));
return uint64FromPersistent(data);
}

View File

@ -147,7 +147,8 @@ rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
rocksdb::Slice const& key, std::string* val) {
TRI_ASSERT(cf != nullptr);
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
TRI_ASSERT(ro.snapshot != nullptr);
TRI_ASSERT(ro.snapshot != nullptr ||
_state->isReadOnlyTransaction() && _state->isSingleOperation());
return _db->Get(ro, cf, key, val);
}
@ -156,7 +157,8 @@ rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
rocksdb::PinnableSlice* val) {
TRI_ASSERT(cf != nullptr);
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
TRI_ASSERT(ro.snapshot != nullptr);
TRI_ASSERT(ro.snapshot != nullptr ||
_state->isReadOnlyTransaction() && _state->isSingleOperation());
return _db->Get(ro, cf, key, val);
}

View File

@ -45,7 +45,6 @@
#include "RocksDBEngine/RocksDBSettingsManager.h"
#include "RocksDBEngine/RocksDBVPackIndex.h"
#include "RocksDBEngine/RocksDBValue.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "Transaction/Helpers.h"
#include "VocBase/KeyGenerator.h"
#include "VocBase/ticks.h"
@ -192,12 +191,12 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
continue;
}
TRI_DEFER(vocbase->release());
auto collection = vocbase->lookupCollection(dbColPair.second);
if (collection == nullptr) {
auto coll = vocbase->lookupCollection(dbColPair.second);
if (coll == nullptr) {
continue;
}
auto* rcoll = static_cast<RocksDBCollection*>(collection->getPhysical());
auto* rcoll = static_cast<RocksDBCollection*>(coll->getPhysical());
if (ops.mustTruncate) { // first we must reset the counter
rcoll->meta().countRefUnsafe()._added = 0;
rcoll->meta().countRefUnsafe()._removed = 0;
@ -214,7 +213,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
auto const& it = _generators.find(rcoll->objectId());
if (it != _generators.end()) {
std::string k(basics::StringUtils::itoa(it->second));
collection->keyGenerator()->track(k.data(), k.size());
coll->keyGenerator()->track(k.data(), k.size());
_generators.erase(it);
}
}
@ -289,7 +288,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
/// Truncate indexes of collection with objectId
bool truncateIndexes(uint64_t objectId) {
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
RocksDBEngine::CollectionPair pair = engine->mapObjectToCollection(objectId);
if (pair.first == 0 || pair.second == 0) {
return false;
@ -320,7 +319,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
// find estimator for index
RocksDBCuckooIndexEstimator<uint64_t>* findEstimator(uint64_t objectId) {
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
RocksDBEngine::IndexTriple triple = engine->mapObjectToIndex(objectId);
if (std::get<0>(triple) == 0 && std::get<1>(triple) == 0) {
return nullptr;
@ -420,9 +419,8 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
updateMaxTick(column_family_id, key, value);
if (column_family_id == RocksDBColumnFamily::documents()->GetID()) {
uint64_t objectId = RocksDBKey::objectId(key);
Operations* ops = nullptr;
if (shouldHandleCollection(objectId, &ops)) {
if (shouldHandleCollection(RocksDBKey::objectId(key), &ops)) {
TRI_ASSERT(ops != nullptr);
ops->lastSequenceNumber = _currentSequence;
ops->added++;
@ -448,7 +446,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
}
}
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto helper : engine->recoveryHelpers()) {
helper->PutCF(column_family_id, key, value);
}
@ -498,7 +496,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
LOG_TOPIC("5f341", TRACE, Logger::ENGINES) << "recovering DELETE " << RocksDBKey(key);
handleDeleteCF(column_family_id, key);
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto helper : engine->recoveryHelpers()) {
helper->DeleteCF(column_family_id, key);
}
@ -510,7 +508,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
LOG_TOPIC("aa997", TRACE, Logger::ENGINES) << "recovering SINGLE DELETE " << RocksDBKey(key);
handleDeleteCF(column_family_id, key);
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto helper : engine->recoveryHelpers()) {
helper->SingleDeleteCF(column_family_id, key);
}
@ -525,10 +523,32 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
<< RocksDBKey(end_key);
incTick();
// drop and truncate can use this, truncate is handled via a Log marker
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto helper : engine->recoveryHelpers()) {
helper->DeleteRangeCF(column_family_id, begin_key, end_key);
}
// check for a range-delete of the primary index
if (column_family_id == RocksDBColumnFamily::documents()->GetID()) {
uint64_t objectId = RocksDBKey::objectId(begin_key);
TRI_ASSERT(objectId == RocksDBKey::objectId(end_key));
Operations* ops = nullptr;
if (shouldHandleCollection(objectId, &ops)) {
TRI_ASSERT(ops != nullptr);
ops->lastSequenceNumber = _currentSequence;
ops->removed = 0;
ops->added = 0;
ops->mustTruncate = true;
}
// index estimates have their own commitSeq
if (!truncateIndexes(objectId)) {
// unable to truncate indexes of the collection.
// may be due to collection having been deleted etc.
LOG_TOPIC("04032", WARN, Logger::ENGINES)
<< "unable to truncate indexes for objectId " << objectId;
}
}
return rocksdb::Status(); // make WAL iterator happy
}
@ -538,39 +558,15 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
RocksDBLogType type = RocksDBLogValue::type(blob);
switch (type) {
case RocksDBLogType::DocumentRemoveV2: // remove within a trx
TRI_ASSERT(_lastRemovedDocRid == 0);
_lastRemovedDocRid = RocksDBLogValue::revisionId(blob);
break;
case RocksDBLogType::SingleRemoveV2: // single remove
TRI_ASSERT(_lastRemovedDocRid == 0);
_lastRemovedDocRid = RocksDBLogValue::revisionId(blob);
break;
case RocksDBLogType::CollectionTruncate: {
uint64_t objectId = RocksDBLogValue::objectId(blob);
Operations* ops = nullptr;
if (shouldHandleCollection(objectId, &ops)) {
TRI_ASSERT(ops != nullptr);
ops->lastSequenceNumber = _currentSequence;
ops->removed = 0;
ops->added = 0;
ops->mustTruncate = true;
}
// index estimates have their own commitSeq
if (!truncateIndexes(objectId)) {
// unable to truncate indexes of the collection.
// may be due to collection having been deleted etc.
LOG_TOPIC("04032", WARN, Logger::ENGINES)
<< "unable to truncate indexes for objectId " << objectId;
}
_lastRemovedDocRid = 0; // reset in any other case
break;
}
default:
_lastRemovedDocRid = 0; // reset in any other case
break;
}
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto helper : engine->recoveryHelpers()) {
helper->LogData(blob);
}
@ -585,7 +581,7 @@ Result RocksDBRecoveryManager::parseRocksWAL() {
Result res = basics::catchToResult([&]() -> Result {
Result rv;
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
RocksDBEngine* engine = rocksutils::globalRocksEngine();
for (auto& helper : engine->recoveryHelpers()) {
helper->prepare();
}

View File

@ -116,15 +116,13 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
_rocksReadOptions.prefix_same_as_start = true; // should always be true
TRI_ASSERT(_readSnapshot == nullptr);
if (isReadOnlyTransaction()) {
if (_readSnapshot == nullptr) { // replication may donate a snapshot
_readSnapshot = db->GetSnapshot(); // must call ReleaseSnapshot later
}
_readSnapshot = db->GetSnapshot(); // must call ReleaseSnapshot later
TRI_ASSERT(_readSnapshot != nullptr);
_rocksReadOptions.snapshot = _readSnapshot;
_rocksMethods.reset(new RocksDBReadOnlyMethods(this));
} else {
TRI_ASSERT(_readSnapshot == nullptr);
createTransaction();
_rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot();
if (hasHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS)) {
@ -226,6 +224,8 @@ void RocksDBTransactionState::cleanupTransaction() noexcept {
_cacheTx = nullptr;
}
if (_readSnapshot != nullptr) {
TRI_ASSERT(isReadOnlyTransaction() ||
hasHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS));
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
db->ReleaseSnapshot(_readSnapshot); // calls delete
_readSnapshot = nullptr;
@ -273,6 +273,7 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
TRI_ASSERT(_numLogdata == (2 + _numRemoves));
}
++_numCommits;
TRI_ASSERT(x > 0);
#endif
// prepare for commit on each collection, e.g. place blockers for estimators
@ -355,17 +356,15 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
_rocksTransaction->GetNumPuts() == 0 &&
_rocksTransaction->GetNumDeletes() == 0);
// this is most likely the fill index case
rocksdb::SequenceNumber seq = _rocksTransaction->GetSnapshot()->GetSequenceNumber();
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
for (auto& trxColl : _collections) {
TRI_IF_FAILURE("RocksDBCommitCounts") { continue; }
auto* rcoll = static_cast<RocksDBTransactionCollection*>(trxColl);
rcoll->prepareCommit(id(), seq);
// We get here if we have filled indexes. So let us commit counts and
// any buffered index estimator updates
rcoll->commitCounts(id(), seq + 1);
TRI_ASSERT(!rcoll->hasOperations());
TRI_ASSERT(rcoll->stealTrackedOperations().empty());
}
// don't write anything if the transaction is empty
result = rocksutils::convertStatus(_rocksTransaction->Rollback());
#endif
}
return result;

View File

@ -111,7 +111,7 @@ class RocksDBTransactionState final : public TransactionState {
TRI_ASSERT(_rocksMethods);
return _rocksMethods.get();
}
/// @brief Rocksdb sequence number of snapshot. Works while trx
/// has either a snapshot or a transaction
rocksdb::SequenceNumber sequenceNumber() const;

View File

@ -739,74 +739,104 @@ Result RocksDBVPackIndex::insert(transaction::Methods& trx, RocksDBMethods* mthd
return res;
}
namespace {
bool attributesEqual(VPackSlice first, VPackSlice second,
std::vector<arangodb::basics::AttributeName>::const_iterator begin,
std::vector<arangodb::basics::AttributeName>::const_iterator end) {
for (; begin != end; ++begin) {
// fetch subattribute
first = first.get(begin->name);
second = second.get(begin->name);
if (first.isExternal()) {
first = first.resolveExternal();
}
if (second.isExternal()) {
second = second.resolveExternal();
}
if (begin->shouldExpand &&
first.isArray() && second.isArray()) {
auto next = begin + 1;
VPackArrayIterator it1(first), it2(second);
while (it1.valid() && it2.valid()) {
if (!attributesEqual(*it1, *it2, next, end)) {
return false;
}
it1++;
it2++;
}
return true;
}
int dist = std::distance(begin, end);
bool notF1 = first.isNone() || (dist == 1 && !first.isObject());
bool notF2 = second.isNone() || (dist == 1 && !second.isObject());
if (notF1 != notF2) {
return false;
}
if (notF1 || notF2) { // one of the paths was not found
break;
}
}
return (basics::VelocyPackHelper::compare(first, second, true) == 0);
}
} // namespace
Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthds,
LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc,
Index::OperationMode mode) {
if (!_unique || _useExpansion) {
if (!_unique) {
// only unique index supports in-place updates
// lets also not handle the complex case of expanded arrays
return RocksDBIndex::update(trx, mthds, oldDocumentId, oldDoc,
newDocumentId, newDoc, mode);
} else {
Result res;
rocksdb::Status s;
bool equal = true;
for (size_t i = 0; i < _paths.size(); ++i) {
TRI_ASSERT(!_paths[i].empty());
VPackSlice oldSlice = oldDoc.get(_paths[i]);
VPackSlice newSlice = newDoc.get(_paths[i]);
if ((oldSlice.isNone() || oldSlice.isNull()) &&
(newSlice.isNone() || newSlice.isNull())) {
// attribute not found
if (_sparse) {
// if sparse we do not have to index, this is indicated by result
// being shorter than n
return res;
}
} else if (basics::VelocyPackHelper::compare(oldSlice, newSlice, true)) {
equal = false;
break;
}
}
if (!equal) {
// we can only use in-place updates if no indexed attributes changed
return RocksDBIndex::update(trx, mthds, oldDocumentId, oldDoc,
newDocumentId, newDoc, mode);
}
// more expensive method to
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
SmallVector<RocksDBKey> elements{elementsArena};
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
SmallVector<uint64_t> hashes{hashesArena};
{
// rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(&trx);
int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
if (r != TRI_ERROR_NO_ERROR) {
return addErrorMsg(res, r);
}
}
RocksDBValue value = RocksDBValue::UniqueVPackIndexValue(newDocumentId);
size_t const count = elements.size();
for (size_t i = 0; i < count; ++i) {
RocksDBKey& key = elements[i];
s = mthds->Put(_cf, key, value.string());
if (!s.ok()) {
res = rocksutils::convertStatus(s, rocksutils::index);
break;
}
}
return res;
}
bool equal = true;
for (std::vector<basics::AttributeName> const& path : _fields) {
if (!::attributesEqual(oldDoc, newDoc, path.begin(), path.end())) {
equal = false;
break;
}
}
if (!equal) {
// we can only use in-place updates if no indexed attributes changed
return RocksDBIndex::update(trx, mthds, oldDocumentId, oldDoc,
newDocumentId, newDoc, mode);
}
Result res;
// more expensive method to
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
SmallVector<RocksDBKey> elements{elementsArena};
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
SmallVector<uint64_t> hashes{hashesArena};
{
// rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(&trx);
int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
if (r != TRI_ERROR_NO_ERROR) {
return addErrorMsg(res, r);
}
}
RocksDBValue value = RocksDBValue::UniqueVPackIndexValue(newDocumentId);
size_t const count = elements.size();
for (size_t i = 0; i < count; ++i) {
RocksDBKey& key = elements[i];
rocksdb::Status s = mthds->Put(_cf, key, value.string());
if (!s.ok()) {
res = rocksutils::convertStatus(s, rocksutils::index);
break;
}
}
return res;
}
/// @brief removes a document from the index
@ -844,10 +874,10 @@ Result RocksDBVPackIndex::remove(transaction::Methods& trx, RocksDBMethods* mthd
}
}
} else {
// non-unique index contain the unique objectID
// they should be written exactly once
// non-unique index contain the unique objectID written exactly once
for (size_t i = 0; i < count; ++i) {
s = mthds->SingleDelete(_cf, elements[i]);
if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index));
}

View File

@ -151,10 +151,10 @@ class PhysicalCollection {
arangodb::velocypack::Slice const&) const = 0;
virtual Result read(transaction::Methods*, arangodb::velocypack::StringRef const& key,
ManagedDocumentResult& result, bool) = 0;
ManagedDocumentResult& result, bool lock) = 0;
virtual Result read(transaction::Methods*, arangodb::velocypack::Slice const& key,
ManagedDocumentResult& result, bool) = 0;
ManagedDocumentResult& result, bool lock) = 0;
virtual bool readDocument(transaction::Methods* trx, LocalDocumentId const& token,
ManagedDocumentResult& result) const = 0;

View File

@ -130,7 +130,7 @@ void Collections::enumerate(TRI_vocbase_t* vocbase,
/*static*/ arangodb::Result methods::Collections::lookup( // find collection
TRI_vocbase_t const& vocbase, // vocbase to search
std::string const& name, // collection name
FuncCallback const& func // invoke on found collection
FuncCallback func // invoke on found collection
) {
if (name.empty()) {
return Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
@ -208,7 +208,7 @@ void Collections::enumerate(TRI_vocbase_t* vocbase,
arangodb::velocypack::Slice const& properties, // collection properties
bool createWaitsForSyncReplication, // replication wait flag
bool enforceReplicationFactor, // replication factor flag
FuncCallback const& func // invoke on collection creation
FuncCallback func // invoke on collection creation
) {
if (name.empty()) {
return TRI_ERROR_ARANGO_ILLEGAL_NAME;

View File

@ -73,7 +73,7 @@ struct Collections {
static arangodb::Result lookup( // find collection
TRI_vocbase_t const& vocbase, // vocbase to search
std::string const& name, // collection name
FuncCallback const& callback // invoke on found collection
FuncCallback callback // invoke on found collection
);
/// Create collection, ownership of collection in callback is
@ -85,7 +85,7 @@ struct Collections {
arangodb::velocypack::Slice const& properties, // collection properties
bool createWaitsForSyncReplication, // replication wait flag
bool enforceReplicationFactor, // replication factor flag
FuncCallback const& callback // invoke on collection creation
FuncCallback callback // invoke on collection creation
);
static Result load(TRI_vocbase_t& vocbase, LogicalCollection* coll);

View File

@ -235,7 +235,6 @@ SECTION("test_defaults") {
CHECK((logicalCollection.get() == &(link->collection())));
CHECK((link->fieldNames().empty()));
CHECK((link->fields().empty()));
CHECK((true == link->hasBatchInsert()));
CHECK((false == link->hasExpansion()));
CHECK((false == link->hasSelectivityEstimate()));
CHECK((false == link->implicitlyUnique()));
@ -286,7 +285,6 @@ SECTION("test_defaults") {
CHECK((logicalCollection.get() == &(link->collection())));
CHECK((link->fieldNames().empty()));
CHECK((link->fields().empty()));
CHECK((true == link->hasBatchInsert()));
CHECK((false == link->hasExpansion()));
CHECK((false == link->hasSelectivityEstimate()));
CHECK((false == link->implicitlyUnique()));
@ -1038,4 +1036,4 @@ SECTION("test_write") {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -337,7 +337,6 @@ SECTION("test_create_drop") {
CHECK((updatedCollection0.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -445,7 +444,6 @@ SECTION("test_create_drop") {
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -505,4 +503,4 @@ SECTION("test_create_drop") {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------

View File

@ -1287,7 +1287,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1334,7 +1333,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1381,7 +1379,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1516,7 +1513,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1564,7 +1560,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1853,7 +1848,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -1900,7 +1894,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2051,7 +2044,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2099,7 +2091,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2147,7 +2138,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2495,7 +2485,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2542,7 +2531,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2681,7 +2669,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -2810,7 +2797,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -3117,7 +3103,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -3165,7 +3150,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -3212,7 +3196,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));
@ -3505,7 +3488,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
CHECK((updatedCollection.get() == &(index->collection())));
CHECK((index->fieldNames().empty()));
CHECK((index->fields().empty()));
CHECK((true == index->hasBatchInsert()));
CHECK((false == index->hasExpansion()));
CHECK((false == index->hasSelectivityEstimate()));
CHECK((false == index->implicitlyUnique()));

View File

@ -169,8 +169,6 @@ class EdgeIndexMock final : public arangodb::Index {
size_t memory() const override { return sizeof(EdgeIndexMock); }
bool hasBatchInsert() const override { return false; }
void load() override {}
void unload() override {}
void afterTruncate(TRI_voc_tick_t) override {