mirror of https://gitee.com/bigwinds/arangodb
Remove Obsolete code (#8657)
This commit is contained in:
parent
744845c9bb
commit
7cd84a785a
|
@ -853,7 +853,7 @@ arangodb::Result MoveShard::abort() {
|
||||||
// Current preconditions for all shards
|
// Current preconditions for all shards
|
||||||
doForAllShards(
|
doForAllShards(
|
||||||
_snapshot, _database, shardsLikeMe,
|
_snapshot, _database, shardsLikeMe,
|
||||||
[this, &trx](
|
[&trx](
|
||||||
Slice plan, Slice current, std::string& planPath, std::string& curPath) {
|
Slice plan, Slice current, std::string& planPath, std::string& curPath) {
|
||||||
// Current still as is
|
// Current still as is
|
||||||
trx.add(curPath, current);
|
trx.add(curPath, current);
|
||||||
|
|
|
@ -1002,7 +1002,7 @@ std::string ClusterComm::createCommunicatorDestination(std::string const& endpoi
|
||||||
}
|
}
|
||||||
httpEndpoint.append(path);
|
httpEndpoint.append(path);
|
||||||
|
|
||||||
return std::move(httpEndpoint);
|
return httpEndpoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ClusterCommResult*, HttpRequest*> ClusterComm::prepareRequest(
|
std::pair<ClusterCommResult*, HttpRequest*> ClusterComm::prepareRequest(
|
||||||
|
|
|
@ -61,7 +61,6 @@ class RestHandler : public std::enable_shared_from_this<RestHandler> {
|
||||||
uint64_t messageId() const;
|
uint64_t messageId() const;
|
||||||
|
|
||||||
GeneralRequest const* request() const { return _request.get(); }
|
GeneralRequest const* request() const { return _request.get(); }
|
||||||
std::unique_ptr<GeneralRequest> stealRequest() { return std::move(_request); }
|
|
||||||
|
|
||||||
GeneralResponse* response() const { return _response.get(); }
|
GeneralResponse* response() const { return _response.get(); }
|
||||||
std::unique_ptr<GeneralResponse> stealResponse() {
|
std::unique_ptr<GeneralResponse> stealResponse() {
|
||||||
|
|
|
@ -699,10 +699,6 @@ arangodb::Result IResearchLink::drop() {
|
||||||
return arangodb::Result();
|
return arangodb::Result();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IResearchLink::hasBatchInsert() const {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IResearchLink::hasSelectivityEstimate() const {
|
bool IResearchLink::hasSelectivityEstimate() const {
|
||||||
return false; // selectivity can only be determined per query since multiple fields are indexed
|
return false; // selectivity can only be determined per query since multiple fields are indexed
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,6 @@ class IResearchLink {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
arangodb::Result drop(); // arangodb::Index override
|
arangodb::Result drop(); // arangodb::Index override
|
||||||
|
|
||||||
bool hasBatchInsert() const; // arangodb::Index override
|
|
||||||
bool hasSelectivityEstimate() const; // arangodb::Index override
|
bool hasSelectivityEstimate() const; // arangodb::Index override
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -62,10 +62,6 @@ class IResearchLinkCoordinator final : public arangodb::ClusterIndex, public IRe
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
static arangodb::IndexTypeFactory const& factory();
|
static arangodb::IndexTypeFactory const& factory();
|
||||||
|
|
||||||
virtual bool hasBatchInsert() const override {
|
|
||||||
return IResearchLink::hasBatchInsert();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool hasSelectivityEstimate() const override {
|
virtual bool hasSelectivityEstimate() const override {
|
||||||
return IResearchLink::hasSelectivityEstimate();
|
return IResearchLink::hasSelectivityEstimate();
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,10 +60,6 @@ class IResearchMMFilesLink final : public arangodb::MMFilesIndex, public IResear
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
static arangodb::IndexTypeFactory const& factory();
|
static arangodb::IndexTypeFactory const& factory();
|
||||||
|
|
||||||
virtual bool hasBatchInsert() const override {
|
|
||||||
return IResearchLink::hasBatchInsert();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool hasSelectivityEstimate() const override {
|
virtual bool hasSelectivityEstimate() const override {
|
||||||
return IResearchLink::hasSelectivityEstimate();
|
return IResearchLink::hasSelectivityEstimate();
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,10 +53,6 @@ class IResearchRocksDBLink final : public arangodb::RocksDBIndex, public IResear
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
static arangodb::IndexTypeFactory const& factory();
|
static arangodb::IndexTypeFactory const& factory();
|
||||||
|
|
||||||
virtual bool hasBatchInsert() const override {
|
|
||||||
return IResearchLink::hasBatchInsert();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool hasSelectivityEstimate() const override {
|
virtual bool hasSelectivityEstimate() const override {
|
||||||
return IResearchLink::hasSelectivityEstimate();
|
return IResearchLink::hasSelectivityEstimate();
|
||||||
}
|
}
|
||||||
|
|
|
@ -628,9 +628,6 @@ Result Index::sizeHint(transaction::Methods& trx, size_t size) {
|
||||||
return Result(); // do nothing
|
return Result(); // do nothing
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief default implementation for hasBatchInsert
|
|
||||||
bool Index::hasBatchInsert() const { return false; }
|
|
||||||
|
|
||||||
/// @brief default implementation for supportsFilterCondition
|
/// @brief default implementation for supportsFilterCondition
|
||||||
bool Index::supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const&,
|
bool Index::supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const&,
|
||||||
arangodb::aql::AstNode const*,
|
arangodb::aql::AstNode const*,
|
||||||
|
|
|
@ -213,6 +213,8 @@ class Index {
|
||||||
|
|
||||||
static IndexType type(std::string const& type);
|
static IndexType type(std::string const& type);
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
virtual char const* typeName() const = 0;
|
virtual char const* typeName() const = 0;
|
||||||
|
|
||||||
static bool allowExpansion(IndexType type) {
|
static bool allowExpansion(IndexType type) {
|
||||||
|
@ -337,8 +339,6 @@ class Index {
|
||||||
// give index a hint about the expected size
|
// give index a hint about the expected size
|
||||||
virtual Result sizeHint(transaction::Methods& trx, size_t size);
|
virtual Result sizeHint(transaction::Methods& trx, size_t size);
|
||||||
|
|
||||||
virtual bool hasBatchInsert() const;
|
|
||||||
|
|
||||||
virtual bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
virtual bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
||||||
arangodb::aql::AstNode const*,
|
arangodb::aql::AstNode const*,
|
||||||
arangodb::aql::Variable const*, size_t,
|
arangodb::aql::Variable const*, size_t,
|
||||||
|
|
|
@ -176,8 +176,6 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
|
||||||
|
|
||||||
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
||||||
|
|
||||||
bool hasBatchInsert() const override { return true; }
|
|
||||||
|
|
||||||
TRI_MMFilesEdgeIndexHash_t* from() const { return _edgesFrom.get(); }
|
TRI_MMFilesEdgeIndexHash_t* from() const { return _edgesFrom.get(); }
|
||||||
|
|
||||||
TRI_MMFilesEdgeIndexHash_t* to() const { return _edgesTo.get(); }
|
TRI_MMFilesEdgeIndexHash_t* to() const { return _edgesTo.get(); }
|
||||||
|
|
|
@ -268,8 +268,6 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
||||||
|
|
||||||
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
||||||
|
|
||||||
bool hasBatchInsert() const override { return true; }
|
|
||||||
|
|
||||||
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
||||||
arangodb::aql::AstNode const*,
|
arangodb::aql::AstNode const*,
|
||||||
arangodb::aql::Variable const*, size_t, size_t&,
|
arangodb::aql::Variable const*, size_t, size_t&,
|
||||||
|
|
|
@ -147,7 +147,7 @@ Result RocksDBBuilderIndex::remove(transaction::Methods& trx, RocksDBMethods* mt
|
||||||
}
|
}
|
||||||
|
|
||||||
// fast mode assuming exclusive access locked from outside
|
// fast mode assuming exclusive access locked from outside
|
||||||
template <typename WriteBatchType, typename MethodsType>
|
template <typename WriteBatchType, typename MethodsType, bool foreground>
|
||||||
static arangodb::Result fillIndex(RocksDBIndex& ridx, WriteBatchType& batch,
|
static arangodb::Result fillIndex(RocksDBIndex& ridx, WriteBatchType& batch,
|
||||||
rocksdb::Snapshot const* snap) {
|
rocksdb::Snapshot const* snap) {
|
||||||
// fillindex can be non transactional, we just need to clean up
|
// fillindex can be non transactional, we just need to clean up
|
||||||
|
@ -204,9 +204,19 @@ static arangodb::Result fillIndex(RocksDBIndex& ridx, WriteBatchType& batch,
|
||||||
TRI_ASSERT(ridx.hasSelectivityEstimate() && ops.size() == 1);
|
TRI_ASSERT(ridx.hasSelectivityEstimate() && ops.size() == 1);
|
||||||
auto it = ops.begin();
|
auto it = ops.begin();
|
||||||
TRI_ASSERT(ridx.id() == it->first);
|
TRI_ASSERT(ridx.id() == it->first);
|
||||||
|
|
||||||
|
if (foreground) {
|
||||||
|
for (uint64_t hash : it->second.inserts) {
|
||||||
|
ridx.estimator()->insert(hash);
|
||||||
|
}
|
||||||
|
for (uint64_t hash : it->second.removals) {
|
||||||
|
ridx.estimator()->remove(hash);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
ridx.estimator()->bufferUpdates(seq, std::move(it->second.inserts),
|
ridx.estimator()->bufferUpdates(seq, std::move(it->second.inserts),
|
||||||
std::move(it->second.removals));
|
std::move(it->second.removals));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
for (it->Seek(bounds.start()); it->Valid(); it->Next()) {
|
for (it->Seek(bounds.start()); it->Valid(); it->Next()) {
|
||||||
|
@ -266,12 +276,12 @@ arangodb::Result RocksDBBuilderIndex::fillIndexForeground() {
|
||||||
// unique index. we need to keep track of all our changes because we need to
|
// unique index. we need to keep track of all our changes because we need to
|
||||||
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
|
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
|
||||||
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
|
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
|
||||||
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods>(*internal, batch, snap);
|
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods, true>(*internal, batch, snap);
|
||||||
} else {
|
} else {
|
||||||
// non-unique index. all index keys will be unique anyway because they
|
// non-unique index. all index keys will be unique anyway because they
|
||||||
// contain the document id we can therefore get away with a cheap WriteBatch
|
// contain the document id we can therefore get away with a cheap WriteBatch
|
||||||
rocksdb::WriteBatch batch(32 * 1024 * 1024);
|
rocksdb::WriteBatch batch(32 * 1024 * 1024);
|
||||||
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods>(*internal, batch, snap);
|
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods, true>(*internal, batch, snap);
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -544,17 +554,18 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
|
||||||
});
|
});
|
||||||
|
|
||||||
locker.unlock();
|
locker.unlock();
|
||||||
|
// Step 1. Capture with snapshot
|
||||||
if (internal->unique()) {
|
if (internal->unique()) {
|
||||||
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();
|
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();
|
||||||
// unique index. we need to keep track of all our changes because we need to
|
// unique index. we need to keep track of all our changes because we need to
|
||||||
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
|
// avoid duplicate index keys. must therefore use a WriteBatchWithIndex
|
||||||
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
|
rocksdb::WriteBatchWithIndex batch(cmp, 32 * 1024 * 1024);
|
||||||
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods>(*internal, batch, snap);
|
res = ::fillIndex<rocksdb::WriteBatchWithIndex, RocksDBBatchedWithIndexMethods, false>(*internal, batch, snap);
|
||||||
} else {
|
} else {
|
||||||
// non-unique index. all index keys will be unique anyway because they
|
// non-unique index. all index keys will be unique anyway because they
|
||||||
// contain the document id we can therefore get away with a cheap WriteBatch
|
// contain the document id we can therefore get away with a cheap WriteBatch
|
||||||
rocksdb::WriteBatch batch(32 * 1024 * 1024);
|
rocksdb::WriteBatch batch(32 * 1024 * 1024);
|
||||||
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods>(*internal, batch, snap);
|
res = ::fillIndex<rocksdb::WriteBatch, RocksDBBatchedMethods, false>(*internal, batch, snap);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (res.fail()) {
|
if (res.fail()) {
|
||||||
|
@ -565,6 +576,7 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
|
||||||
rootDB->ReleaseSnapshot(snap);
|
rootDB->ReleaseSnapshot(snap);
|
||||||
snap = nullptr;
|
snap = nullptr;
|
||||||
|
|
||||||
|
// Step 2. Scan the WAL for documents without lock
|
||||||
int maxCatchups = 3;
|
int maxCatchups = 3;
|
||||||
rocksdb::SequenceNumber lastScanned = 0;
|
rocksdb::SequenceNumber lastScanned = 0;
|
||||||
uint64_t numScanned = 0;
|
uint64_t numScanned = 0;
|
||||||
|
@ -597,6 +609,8 @@ arangodb::Result RocksDBBuilderIndex::fillIndexBackground(Locker& locker) {
|
||||||
return res.reset(TRI_ERROR_LOCK_TIMEOUT);
|
return res.reset(TRI_ERROR_LOCK_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Step 3. Scan the WAL for documents with a lock
|
||||||
|
|
||||||
scanFrom = lastScanned;
|
scanFrom = lastScanned;
|
||||||
if (internal->unique()) {
|
if (internal->unique()) {
|
||||||
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();
|
const rocksdb::Comparator* cmp = internal->columnFamily()->GetComparator();
|
||||||
|
|
|
@ -217,7 +217,7 @@ void RocksDBCollection::unload() {
|
||||||
TRI_voc_rid_t RocksDBCollection::revision() const { return _revisionId; }
|
TRI_voc_rid_t RocksDBCollection::revision() const { return _revisionId; }
|
||||||
|
|
||||||
TRI_voc_rid_t RocksDBCollection::revision(transaction::Methods* trx) const {
|
TRI_voc_rid_t RocksDBCollection::revision(transaction::Methods* trx) const {
|
||||||
auto state = RocksDBTransactionState::toState(trx);
|
auto* state = RocksDBTransactionState::toState(trx);
|
||||||
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
||||||
state->findCollection(_logicalCollection.id()));
|
state->findCollection(_logicalCollection.id()));
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ uint64_t RocksDBCollection::numberDocuments() const { return _numberDocuments; }
|
||||||
|
|
||||||
uint64_t RocksDBCollection::numberDocuments(transaction::Methods* trx) const {
|
uint64_t RocksDBCollection::numberDocuments(transaction::Methods* trx) const {
|
||||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||||
auto state = RocksDBTransactionState::toState(trx);
|
auto* state = RocksDBTransactionState::toState(trx);
|
||||||
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
auto trxCollection = static_cast<RocksDBTransactionCollection*>(
|
||||||
state->findCollection(_logicalCollection.id()));
|
state->findCollection(_logicalCollection.id()));
|
||||||
|
|
||||||
|
@ -787,13 +787,13 @@ bool RocksDBCollection::lookupRevision(transaction::Methods* trx, VPackSlice con
|
||||||
|
|
||||||
Result RocksDBCollection::read(transaction::Methods* trx,
|
Result RocksDBCollection::read(transaction::Methods* trx,
|
||||||
arangodb::velocypack::StringRef const& key,
|
arangodb::velocypack::StringRef const& key,
|
||||||
ManagedDocumentResult& result, bool) {
|
ManagedDocumentResult& result, bool /*lock*/) {
|
||||||
LocalDocumentId const documentId = primaryIndex()->lookupKey(trx, key);
|
LocalDocumentId const documentId = primaryIndex()->lookupKey(trx, key);
|
||||||
if (documentId.isSet()) {
|
if (!documentId.isSet()) {
|
||||||
return lookupDocumentVPack(documentId, trx, result, true);
|
|
||||||
}
|
|
||||||
// not found
|
|
||||||
return Result(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
return Result(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||||
|
}
|
||||||
|
// found
|
||||||
|
return lookupDocumentVPack(documentId, trx, result, /*withCache*/true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// read using a token!
|
// read using a token!
|
||||||
|
@ -801,7 +801,7 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
|
||||||
LocalDocumentId const& documentId,
|
LocalDocumentId const& documentId,
|
||||||
ManagedDocumentResult& result) const {
|
ManagedDocumentResult& result) const {
|
||||||
if (documentId.isSet()) {
|
if (documentId.isSet()) {
|
||||||
auto res = lookupDocumentVPack(documentId, trx, result, true);
|
auto res = lookupDocumentVPack(documentId, trx, result, /*withCache*/true);
|
||||||
return res.ok();
|
return res.ok();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -872,7 +872,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
||||||
|
|
||||||
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||||
|
|
||||||
auto state = RocksDBTransactionState::toState(trx);
|
auto* state = RocksDBTransactionState::toState(trx);
|
||||||
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||||
|
|
||||||
res = insertDocument(trx, documentId, newSlice, options);
|
res = insertDocument(trx, documentId, newSlice, options);
|
||||||
|
@ -909,7 +909,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
||||||
Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
||||||
arangodb::velocypack::Slice const newSlice,
|
arangodb::velocypack::Slice const newSlice,
|
||||||
ManagedDocumentResult& mdr, OperationOptions& options,
|
ManagedDocumentResult& mdr, OperationOptions& options,
|
||||||
TRI_voc_tick_t& resultMarkerTick, bool,
|
TRI_voc_tick_t& resultMarkerTick, bool /*lock*/,
|
||||||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||||
std::function<Result(void)> callbackDuringLock) {
|
std::function<Result(void)> callbackDuringLock) {
|
||||||
resultMarkerTick = 0;
|
resultMarkerTick = 0;
|
||||||
|
@ -919,16 +919,15 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
||||||
return Result(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
|
return Result(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
LocalDocumentId const newDocumentId = LocalDocumentId::create();
|
||||||
auto isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
|
auto isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
|
||||||
Result res = this->read(trx, key, previous, /*lock*/ false);
|
|
||||||
|
|
||||||
|
Result res = this->read(trx, key, previous, /*lock*/ false);
|
||||||
if (res.fail()) {
|
if (res.fail()) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_ASSERT(!previous.empty());
|
TRI_ASSERT(!previous.empty());
|
||||||
|
|
||||||
LocalDocumentId const oldDocumentId = previous.localDocumentId();
|
LocalDocumentId const oldDocumentId = previous.localDocumentId();
|
||||||
VPackSlice oldDoc(previous.vpack());
|
VPackSlice oldDoc(previous.vpack());
|
||||||
TRI_voc_rid_t const oldRevisionId = transaction::helpers::extractRevFromDocument(oldDoc);
|
TRI_voc_rid_t const oldRevisionId = transaction::helpers::extractRevFromDocument(oldDoc);
|
||||||
|
@ -982,12 +981,12 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
||||||
|
|
||||||
VPackSlice const newDoc(builder->slice());
|
VPackSlice const newDoc(builder->slice());
|
||||||
|
|
||||||
auto state = RocksDBTransactionState::toState(trx);
|
auto* state = RocksDBTransactionState::toState(trx);
|
||||||
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||||
|
|
||||||
// add possible log statement under guard
|
// add possible log statement under guard
|
||||||
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||||
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
|
res = updateDocument(trx, oldDocumentId, oldDoc, newDocumentId, newDoc, options);
|
||||||
|
|
||||||
if (res.ok()) {
|
if (res.ok()) {
|
||||||
trackWaitForSync(trx, options);
|
trackWaitForSync(trx, options);
|
||||||
|
@ -995,7 +994,7 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
||||||
if (options.silent) {
|
if (options.silent) {
|
||||||
mdr.clear();
|
mdr.clear();
|
||||||
} else {
|
} else {
|
||||||
mdr.setManaged(newDoc.begin(), documentId);
|
mdr.setManaged(newDoc.begin(), newDocumentId);
|
||||||
TRI_ASSERT(!mdr.empty());
|
TRI_ASSERT(!mdr.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1027,16 +1026,16 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
||||||
std::function<Result(void)> callbackDuringLock) {
|
std::function<Result(void)> callbackDuringLock) {
|
||||||
resultMarkerTick = 0;
|
resultMarkerTick = 0;
|
||||||
|
|
||||||
bool const isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
|
|
||||||
|
|
||||||
// get the previous revision
|
// get the previous revision
|
||||||
VPackSlice key = newSlice.get(StaticStrings::KeyString);
|
VPackSlice key = newSlice.get(StaticStrings::KeyString);
|
||||||
|
|
||||||
Result res;
|
Result res;
|
||||||
if (key.isNone()) {
|
if (key.isNone()) {
|
||||||
return res.reset(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
|
return res.reset(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LocalDocumentId const newDocumentId = LocalDocumentId::create();
|
||||||
|
bool const isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
|
||||||
|
|
||||||
// get the previous revision
|
// get the previous revision
|
||||||
res = this->read(trx, key, previous, /*lock*/ false);
|
res = this->read(trx, key, previous, /*lock*/ false);
|
||||||
if (res.fail()) {
|
if (res.fail()) {
|
||||||
|
@ -1063,8 +1062,6 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
|
||||||
|
|
||||||
// merge old and new values
|
// merge old and new values
|
||||||
TRI_voc_rid_t revisionId;
|
TRI_voc_rid_t revisionId;
|
||||||
transaction::BuilderLeaser builder(trx);
|
transaction::BuilderLeaser builder(trx);
|
||||||
|
@ -1087,13 +1084,13 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
||||||
|
|
||||||
VPackSlice const newDoc(builder->slice());
|
VPackSlice const newDoc(builder->slice());
|
||||||
|
|
||||||
auto state = RocksDBTransactionState::toState(trx);
|
auto* state = RocksDBTransactionState::toState(trx);
|
||||||
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||||
|
|
||||||
// add possible log statement under guard
|
// add possible log statement under guard
|
||||||
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
state->prepareOperation(_logicalCollection.id(), revisionId, TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||||
|
|
||||||
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
|
res = updateDocument(trx, oldDocumentId, oldDoc, newDocumentId, newDoc, options);
|
||||||
|
|
||||||
if (res.ok()) {
|
if (res.ok()) {
|
||||||
trackWaitForSync(trx, options);
|
trackWaitForSync(trx, options);
|
||||||
|
@ -1101,7 +1098,7 @@ Result RocksDBCollection::replace(transaction::Methods* trx,
|
||||||
if (options.silent) {
|
if (options.silent) {
|
||||||
mdr.clear();
|
mdr.clear();
|
||||||
} else {
|
} else {
|
||||||
mdr.setManaged(newDoc.begin(), documentId);
|
mdr.setManaged(newDoc.begin(), newDocumentId);
|
||||||
TRI_ASSERT(!mdr.empty());
|
TRI_ASSERT(!mdr.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1337,6 +1334,7 @@ Result RocksDBCollection::updateDocument(transaction::Methods* trx,
|
||||||
|
|
||||||
RocksDBKeyLeaser key(trx);
|
RocksDBKeyLeaser key(trx);
|
||||||
key->constructDocument(_objectId, oldDocumentId);
|
key->constructDocument(_objectId, oldDocumentId);
|
||||||
|
TRI_ASSERT(key->containsLocalDocumentId(oldDocumentId));
|
||||||
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
||||||
|
|
||||||
rocksdb::Status s = mthds->SingleDelete(RocksDBColumnFamily::documents(), key.ref());
|
rocksdb::Status s = mthds->SingleDelete(RocksDBColumnFamily::documents(), key.ref());
|
||||||
|
@ -1345,8 +1343,8 @@ Result RocksDBCollection::updateDocument(transaction::Methods* trx,
|
||||||
}
|
}
|
||||||
|
|
||||||
key->constructDocument(_objectId, newDocumentId);
|
key->constructDocument(_objectId, newDocumentId);
|
||||||
// simon: we do not need to blacklist the new documentId
|
|
||||||
TRI_ASSERT(key->containsLocalDocumentId(newDocumentId));
|
TRI_ASSERT(key->containsLocalDocumentId(newDocumentId));
|
||||||
|
// simon: we do not need to blacklist the new documentId
|
||||||
s = mthds->PutUntracked(RocksDBColumnFamily::documents(), key.ref(),
|
s = mthds->PutUntracked(RocksDBColumnFamily::documents(), key.ref(),
|
||||||
rocksdb::Slice(newDoc.startAs<char>(),
|
rocksdb::Slice(newDoc.startAs<char>(),
|
||||||
static_cast<size_t>(newDoc.byteSize())));
|
static_cast<size_t>(newDoc.byteSize())));
|
||||||
|
|
|
@ -93,7 +93,7 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t objectId
|
||||||
return rocks->mapObjectToCollection(objectId);
|
return rocks->mapObjectToCollection(objectId);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<TRI_voc_tick_t, TRI_voc_cid_t, TRI_idx_iid_t> mapObjectToIndex(uint64_t objectId) {
|
RocksDBEngine::IndexTriple mapObjectToIndex(uint64_t objectId) {
|
||||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||||
TRI_ASSERT(engine != nullptr);
|
TRI_ASSERT(engine != nullptr);
|
||||||
RocksDBEngine* rocks = static_cast<RocksDBEngine*>(engine);
|
RocksDBEngine* rocks = static_cast<RocksDBEngine*>(engine);
|
||||||
|
|
|
@ -71,7 +71,7 @@ arangodb::Result globalRocksDBRemove(rocksdb::ColumnFamilyHandle* cf,
|
||||||
uint64_t latestSequenceNumber();
|
uint64_t latestSequenceNumber();
|
||||||
|
|
||||||
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
|
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
|
||||||
std::tuple<TRI_voc_tick_t, TRI_voc_cid_t, TRI_idx_iid_t> mapObjectToIndex(uint64_t);
|
RocksDBEngine::IndexTriple mapObjectToIndex(uint64_t);
|
||||||
|
|
||||||
/// @brief count all keys in the given column family
|
/// @brief count all keys in the given column family
|
||||||
std::size_t countKeys(rocksdb::DB*, rocksdb::ColumnFamilyHandle* cf);
|
std::size_t countKeys(rocksdb::DB*, rocksdb::ColumnFamilyHandle* cf);
|
||||||
|
|
|
@ -92,8 +92,6 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
||||||
|
|
||||||
void toVelocyPack(VPackBuilder&, std::underlying_type<Index::Serialize>::type) const override;
|
void toVelocyPack(VPackBuilder&, std::underlying_type<Index::Serialize>::type) const override;
|
||||||
|
|
||||||
bool hasBatchInsert() const override { return false; }
|
|
||||||
|
|
||||||
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
bool supportsFilterCondition(std::vector<std::shared_ptr<arangodb::Index>> const& allIndexes,
|
||||||
arangodb::aql::AstNode const*,
|
arangodb::aql::AstNode const*,
|
||||||
arangodb::aql::Variable const*, size_t, size_t&,
|
arangodb::aql::Variable const*, size_t, size_t&,
|
||||||
|
|
|
@ -242,7 +242,8 @@ void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
|
||||||
|
|
||||||
Result RocksDBIndex::update(transaction::Methods& trx, RocksDBMethods* mthd,
|
Result RocksDBIndex::update(transaction::Methods& trx, RocksDBMethods* mthd,
|
||||||
LocalDocumentId const& oldDocumentId,
|
LocalDocumentId const& oldDocumentId,
|
||||||
velocypack::Slice const& oldDoc, LocalDocumentId const& newDocumentId,
|
velocypack::Slice const& oldDoc,
|
||||||
|
LocalDocumentId const& newDocumentId,
|
||||||
velocypack::Slice const& newDoc, Index::OperationMode mode) {
|
velocypack::Slice const& newDoc, Index::OperationMode mode) {
|
||||||
// It is illegal to call this method on the primary index
|
// It is illegal to call this method on the primary index
|
||||||
// RocksDBPrimaryIndex must override this method accordingly
|
// RocksDBPrimaryIndex must override this method accordingly
|
||||||
|
|
|
@ -406,7 +406,7 @@ TRI_voc_cid_t RocksDBKey::viewId(char const* data, size_t size) {
|
||||||
|
|
||||||
TRI_voc_cid_t RocksDBKey::objectId(char const* data, size_t size) {
|
TRI_voc_cid_t RocksDBKey::objectId(char const* data, size_t size) {
|
||||||
TRI_ASSERT(data != nullptr);
|
TRI_ASSERT(data != nullptr);
|
||||||
TRI_ASSERT(size > sizeof(uint64_t));
|
TRI_ASSERT(size >= sizeof(uint64_t));
|
||||||
return uint64FromPersistent(data);
|
return uint64FromPersistent(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,8 @@ rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||||
rocksdb::Slice const& key, std::string* val) {
|
rocksdb::Slice const& key, std::string* val) {
|
||||||
TRI_ASSERT(cf != nullptr);
|
TRI_ASSERT(cf != nullptr);
|
||||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||||
TRI_ASSERT(ro.snapshot != nullptr);
|
TRI_ASSERT(ro.snapshot != nullptr ||
|
||||||
|
_state->isReadOnlyTransaction() && _state->isSingleOperation());
|
||||||
return _db->Get(ro, cf, key, val);
|
return _db->Get(ro, cf, key, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +157,8 @@ rocksdb::Status RocksDBReadOnlyMethods::Get(rocksdb::ColumnFamilyHandle* cf,
|
||||||
rocksdb::PinnableSlice* val) {
|
rocksdb::PinnableSlice* val) {
|
||||||
TRI_ASSERT(cf != nullptr);
|
TRI_ASSERT(cf != nullptr);
|
||||||
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
|
||||||
TRI_ASSERT(ro.snapshot != nullptr);
|
TRI_ASSERT(ro.snapshot != nullptr ||
|
||||||
|
_state->isReadOnlyTransaction() && _state->isSingleOperation());
|
||||||
return _db->Get(ro, cf, key, val);
|
return _db->Get(ro, cf, key, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@
|
||||||
#include "RocksDBEngine/RocksDBSettingsManager.h"
|
#include "RocksDBEngine/RocksDBSettingsManager.h"
|
||||||
#include "RocksDBEngine/RocksDBVPackIndex.h"
|
#include "RocksDBEngine/RocksDBVPackIndex.h"
|
||||||
#include "RocksDBEngine/RocksDBValue.h"
|
#include "RocksDBEngine/RocksDBValue.h"
|
||||||
#include "StorageEngine/EngineSelectorFeature.h"
|
|
||||||
#include "Transaction/Helpers.h"
|
#include "Transaction/Helpers.h"
|
||||||
#include "VocBase/KeyGenerator.h"
|
#include "VocBase/KeyGenerator.h"
|
||||||
#include "VocBase/ticks.h"
|
#include "VocBase/ticks.h"
|
||||||
|
@ -192,12 +191,12 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
TRI_DEFER(vocbase->release());
|
TRI_DEFER(vocbase->release());
|
||||||
auto collection = vocbase->lookupCollection(dbColPair.second);
|
auto coll = vocbase->lookupCollection(dbColPair.second);
|
||||||
if (collection == nullptr) {
|
if (coll == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* rcoll = static_cast<RocksDBCollection*>(collection->getPhysical());
|
auto* rcoll = static_cast<RocksDBCollection*>(coll->getPhysical());
|
||||||
if (ops.mustTruncate) { // first we must reset the counter
|
if (ops.mustTruncate) { // first we must reset the counter
|
||||||
rcoll->meta().countRefUnsafe()._added = 0;
|
rcoll->meta().countRefUnsafe()._added = 0;
|
||||||
rcoll->meta().countRefUnsafe()._removed = 0;
|
rcoll->meta().countRefUnsafe()._removed = 0;
|
||||||
|
@ -214,7 +213,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
auto const& it = _generators.find(rcoll->objectId());
|
auto const& it = _generators.find(rcoll->objectId());
|
||||||
if (it != _generators.end()) {
|
if (it != _generators.end()) {
|
||||||
std::string k(basics::StringUtils::itoa(it->second));
|
std::string k(basics::StringUtils::itoa(it->second));
|
||||||
collection->keyGenerator()->track(k.data(), k.size());
|
coll->keyGenerator()->track(k.data(), k.size());
|
||||||
_generators.erase(it);
|
_generators.erase(it);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -289,7 +288,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
|
|
||||||
/// Truncate indexes of collection with objectId
|
/// Truncate indexes of collection with objectId
|
||||||
bool truncateIndexes(uint64_t objectId) {
|
bool truncateIndexes(uint64_t objectId) {
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
RocksDBEngine::CollectionPair pair = engine->mapObjectToCollection(objectId);
|
RocksDBEngine::CollectionPair pair = engine->mapObjectToCollection(objectId);
|
||||||
if (pair.first == 0 || pair.second == 0) {
|
if (pair.first == 0 || pair.second == 0) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -320,7 +319,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
|
|
||||||
// find estimator for index
|
// find estimator for index
|
||||||
RocksDBCuckooIndexEstimator<uint64_t>* findEstimator(uint64_t objectId) {
|
RocksDBCuckooIndexEstimator<uint64_t>* findEstimator(uint64_t objectId) {
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
RocksDBEngine::IndexTriple triple = engine->mapObjectToIndex(objectId);
|
RocksDBEngine::IndexTriple triple = engine->mapObjectToIndex(objectId);
|
||||||
if (std::get<0>(triple) == 0 && std::get<1>(triple) == 0) {
|
if (std::get<0>(triple) == 0 && std::get<1>(triple) == 0) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -420,9 +419,8 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
|
|
||||||
updateMaxTick(column_family_id, key, value);
|
updateMaxTick(column_family_id, key, value);
|
||||||
if (column_family_id == RocksDBColumnFamily::documents()->GetID()) {
|
if (column_family_id == RocksDBColumnFamily::documents()->GetID()) {
|
||||||
uint64_t objectId = RocksDBKey::objectId(key);
|
|
||||||
Operations* ops = nullptr;
|
Operations* ops = nullptr;
|
||||||
if (shouldHandleCollection(objectId, &ops)) {
|
if (shouldHandleCollection(RocksDBKey::objectId(key), &ops)) {
|
||||||
TRI_ASSERT(ops != nullptr);
|
TRI_ASSERT(ops != nullptr);
|
||||||
ops->lastSequenceNumber = _currentSequence;
|
ops->lastSequenceNumber = _currentSequence;
|
||||||
ops->added++;
|
ops->added++;
|
||||||
|
@ -448,7 +446,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto helper : engine->recoveryHelpers()) {
|
for (auto helper : engine->recoveryHelpers()) {
|
||||||
helper->PutCF(column_family_id, key, value);
|
helper->PutCF(column_family_id, key, value);
|
||||||
}
|
}
|
||||||
|
@ -498,7 +496,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
|
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
|
||||||
LOG_TOPIC("5f341", TRACE, Logger::ENGINES) << "recovering DELETE " << RocksDBKey(key);
|
LOG_TOPIC("5f341", TRACE, Logger::ENGINES) << "recovering DELETE " << RocksDBKey(key);
|
||||||
handleDeleteCF(column_family_id, key);
|
handleDeleteCF(column_family_id, key);
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto helper : engine->recoveryHelpers()) {
|
for (auto helper : engine->recoveryHelpers()) {
|
||||||
helper->DeleteCF(column_family_id, key);
|
helper->DeleteCF(column_family_id, key);
|
||||||
}
|
}
|
||||||
|
@ -510,7 +508,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
LOG_TOPIC("aa997", TRACE, Logger::ENGINES) << "recovering SINGLE DELETE " << RocksDBKey(key);
|
LOG_TOPIC("aa997", TRACE, Logger::ENGINES) << "recovering SINGLE DELETE " << RocksDBKey(key);
|
||||||
handleDeleteCF(column_family_id, key);
|
handleDeleteCF(column_family_id, key);
|
||||||
|
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto helper : engine->recoveryHelpers()) {
|
for (auto helper : engine->recoveryHelpers()) {
|
||||||
helper->SingleDeleteCF(column_family_id, key);
|
helper->SingleDeleteCF(column_family_id, key);
|
||||||
}
|
}
|
||||||
|
@ -525,28 +523,16 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
<< RocksDBKey(end_key);
|
<< RocksDBKey(end_key);
|
||||||
incTick();
|
incTick();
|
||||||
// drop and truncate can use this, truncate is handled via a Log marker
|
// drop and truncate can use this, truncate is handled via a Log marker
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto helper : engine->recoveryHelpers()) {
|
for (auto helper : engine->recoveryHelpers()) {
|
||||||
helper->DeleteRangeCF(column_family_id, begin_key, end_key);
|
helper->DeleteRangeCF(column_family_id, begin_key, end_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rocksdb::Status(); // make WAL iterator happy
|
// check for a range-delete of the primary index
|
||||||
}
|
if (column_family_id == RocksDBColumnFamily::documents()->GetID()) {
|
||||||
|
uint64_t objectId = RocksDBKey::objectId(begin_key);
|
||||||
|
TRI_ASSERT(objectId == RocksDBKey::objectId(end_key));
|
||||||
|
|
||||||
void LogData(const rocksdb::Slice& blob) override {
|
|
||||||
// a delete log message appears directly before a Delete
|
|
||||||
RocksDBLogType type = RocksDBLogValue::type(blob);
|
|
||||||
switch (type) {
|
|
||||||
case RocksDBLogType::DocumentRemoveV2: // remove within a trx
|
|
||||||
TRI_ASSERT(_lastRemovedDocRid == 0);
|
|
||||||
_lastRemovedDocRid = RocksDBLogValue::revisionId(blob);
|
|
||||||
break;
|
|
||||||
case RocksDBLogType::SingleRemoveV2: // single remove
|
|
||||||
TRI_ASSERT(_lastRemovedDocRid == 0);
|
|
||||||
_lastRemovedDocRid = RocksDBLogValue::revisionId(blob);
|
|
||||||
break;
|
|
||||||
case RocksDBLogType::CollectionTruncate: {
|
|
||||||
uint64_t objectId = RocksDBLogValue::objectId(blob);
|
|
||||||
Operations* ops = nullptr;
|
Operations* ops = nullptr;
|
||||||
if (shouldHandleCollection(objectId, &ops)) {
|
if (shouldHandleCollection(objectId, &ops)) {
|
||||||
TRI_ASSERT(ops != nullptr);
|
TRI_ASSERT(ops != nullptr);
|
||||||
|
@ -562,15 +548,25 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
||||||
LOG_TOPIC("04032", WARN, Logger::ENGINES)
|
LOG_TOPIC("04032", WARN, Logger::ENGINES)
|
||||||
<< "unable to truncate indexes for objectId " << objectId;
|
<< "unable to truncate indexes for objectId " << objectId;
|
||||||
}
|
}
|
||||||
|
|
||||||
_lastRemovedDocRid = 0; // reset in any other case
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return rocksdb::Status(); // make WAL iterator happy
|
||||||
|
}
|
||||||
|
|
||||||
|
void LogData(const rocksdb::Slice& blob) override {
|
||||||
|
// a delete log message appears directly before a Delete
|
||||||
|
RocksDBLogType type = RocksDBLogValue::type(blob);
|
||||||
|
switch (type) {
|
||||||
|
case RocksDBLogType::DocumentRemoveV2: // remove within a trx
|
||||||
|
case RocksDBLogType::SingleRemoveV2: // single remove
|
||||||
|
TRI_ASSERT(_lastRemovedDocRid == 0);
|
||||||
|
_lastRemovedDocRid = RocksDBLogValue::revisionId(blob);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
_lastRemovedDocRid = 0; // reset in any other case
|
_lastRemovedDocRid = 0; // reset in any other case
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto helper : engine->recoveryHelpers()) {
|
for (auto helper : engine->recoveryHelpers()) {
|
||||||
helper->LogData(blob);
|
helper->LogData(blob);
|
||||||
}
|
}
|
||||||
|
@ -585,7 +581,7 @@ Result RocksDBRecoveryManager::parseRocksWAL() {
|
||||||
|
|
||||||
Result res = basics::catchToResult([&]() -> Result {
|
Result res = basics::catchToResult([&]() -> Result {
|
||||||
Result rv;
|
Result rv;
|
||||||
RocksDBEngine* engine = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
for (auto& helper : engine->recoveryHelpers()) {
|
for (auto& helper : engine->recoveryHelpers()) {
|
||||||
helper->prepare();
|
helper->prepare();
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,15 +116,13 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
|
||||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||||
_rocksReadOptions.prefix_same_as_start = true; // should always be true
|
_rocksReadOptions.prefix_same_as_start = true; // should always be true
|
||||||
|
|
||||||
|
TRI_ASSERT(_readSnapshot == nullptr);
|
||||||
if (isReadOnlyTransaction()) {
|
if (isReadOnlyTransaction()) {
|
||||||
if (_readSnapshot == nullptr) { // replication may donate a snapshot
|
|
||||||
_readSnapshot = db->GetSnapshot(); // must call ReleaseSnapshot later
|
_readSnapshot = db->GetSnapshot(); // must call ReleaseSnapshot later
|
||||||
}
|
|
||||||
TRI_ASSERT(_readSnapshot != nullptr);
|
TRI_ASSERT(_readSnapshot != nullptr);
|
||||||
_rocksReadOptions.snapshot = _readSnapshot;
|
_rocksReadOptions.snapshot = _readSnapshot;
|
||||||
_rocksMethods.reset(new RocksDBReadOnlyMethods(this));
|
_rocksMethods.reset(new RocksDBReadOnlyMethods(this));
|
||||||
} else {
|
} else {
|
||||||
TRI_ASSERT(_readSnapshot == nullptr);
|
|
||||||
createTransaction();
|
createTransaction();
|
||||||
_rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot();
|
_rocksReadOptions.snapshot = _rocksTransaction->GetSnapshot();
|
||||||
if (hasHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS)) {
|
if (hasHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS)) {
|
||||||
|
@ -226,6 +224,8 @@ void RocksDBTransactionState::cleanupTransaction() noexcept {
|
||||||
_cacheTx = nullptr;
|
_cacheTx = nullptr;
|
||||||
}
|
}
|
||||||
if (_readSnapshot != nullptr) {
|
if (_readSnapshot != nullptr) {
|
||||||
|
TRI_ASSERT(isReadOnlyTransaction() ||
|
||||||
|
hasHint(transaction::Hints::Hint::INTERMEDIATE_COMMITS));
|
||||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||||
db->ReleaseSnapshot(_readSnapshot); // calls delete
|
db->ReleaseSnapshot(_readSnapshot); // calls delete
|
||||||
_readSnapshot = nullptr;
|
_readSnapshot = nullptr;
|
||||||
|
@ -273,6 +273,7 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
||||||
TRI_ASSERT(_numLogdata == (2 + _numRemoves));
|
TRI_ASSERT(_numLogdata == (2 + _numRemoves));
|
||||||
}
|
}
|
||||||
++_numCommits;
|
++_numCommits;
|
||||||
|
TRI_ASSERT(x > 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// prepare for commit on each collection, e.g. place blockers for estimators
|
// prepare for commit on each collection, e.g. place blockers for estimators
|
||||||
|
@ -355,17 +356,15 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
||||||
_rocksTransaction->GetNumPuts() == 0 &&
|
_rocksTransaction->GetNumPuts() == 0 &&
|
||||||
_rocksTransaction->GetNumDeletes() == 0);
|
_rocksTransaction->GetNumDeletes() == 0);
|
||||||
// this is most likely the fill index case
|
// this is most likely the fill index case
|
||||||
rocksdb::SequenceNumber seq = _rocksTransaction->GetSnapshot()->GetSequenceNumber();
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
for (auto& trxColl : _collections) {
|
for (auto& trxColl : _collections) {
|
||||||
TRI_IF_FAILURE("RocksDBCommitCounts") { continue; }
|
TRI_IF_FAILURE("RocksDBCommitCounts") { continue; }
|
||||||
auto* rcoll = static_cast<RocksDBTransactionCollection*>(trxColl);
|
auto* rcoll = static_cast<RocksDBTransactionCollection*>(trxColl);
|
||||||
rcoll->prepareCommit(id(), seq);
|
TRI_ASSERT(!rcoll->hasOperations());
|
||||||
// We get here if we have filled indexes. So let us commit counts and
|
TRI_ASSERT(rcoll->stealTrackedOperations().empty());
|
||||||
// any buffered index estimator updates
|
|
||||||
rcoll->commitCounts(id(), seq + 1);
|
|
||||||
}
|
}
|
||||||
// don't write anything if the transaction is empty
|
// don't write anything if the transaction is empty
|
||||||
result = rocksutils::convertStatus(_rocksTransaction->Rollback());
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -739,36 +739,66 @@ Result RocksDBVPackIndex::insert(transaction::Methods& trx, RocksDBMethods* mthd
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
bool attributesEqual(VPackSlice first, VPackSlice second,
|
||||||
|
std::vector<arangodb::basics::AttributeName>::const_iterator begin,
|
||||||
|
std::vector<arangodb::basics::AttributeName>::const_iterator end) {
|
||||||
|
for (; begin != end; ++begin) {
|
||||||
|
// fetch subattribute
|
||||||
|
first = first.get(begin->name);
|
||||||
|
second = second.get(begin->name);
|
||||||
|
if (first.isExternal()) {
|
||||||
|
first = first.resolveExternal();
|
||||||
|
}
|
||||||
|
if (second.isExternal()) {
|
||||||
|
second = second.resolveExternal();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (begin->shouldExpand &&
|
||||||
|
first.isArray() && second.isArray()) {
|
||||||
|
auto next = begin + 1;
|
||||||
|
VPackArrayIterator it1(first), it2(second);
|
||||||
|
while (it1.valid() && it2.valid()) {
|
||||||
|
if (!attributesEqual(*it1, *it2, next, end)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
it1++;
|
||||||
|
it2++;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
int dist = std::distance(begin, end);
|
||||||
|
bool notF1 = first.isNone() || (dist == 1 && !first.isObject());
|
||||||
|
bool notF2 = second.isNone() || (dist == 1 && !second.isObject());
|
||||||
|
if (notF1 != notF2) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (notF1 || notF2) { // one of the paths was not found
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (basics::VelocyPackHelper::compare(first, second, true) == 0);
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthds,
|
Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthds,
|
||||||
LocalDocumentId const& oldDocumentId,
|
LocalDocumentId const& oldDocumentId,
|
||||||
velocypack::Slice const& oldDoc,
|
velocypack::Slice const& oldDoc,
|
||||||
LocalDocumentId const& newDocumentId,
|
LocalDocumentId const& newDocumentId,
|
||||||
velocypack::Slice const& newDoc,
|
velocypack::Slice const& newDoc,
|
||||||
Index::OperationMode mode) {
|
Index::OperationMode mode) {
|
||||||
if (!_unique || _useExpansion) {
|
if (!_unique) {
|
||||||
// only unique index supports in-place updates
|
// only unique index supports in-place updates
|
||||||
// lets also not handle the complex case of expanded arrays
|
// lets also not handle the complex case of expanded arrays
|
||||||
return RocksDBIndex::update(trx, mthds, oldDocumentId, oldDoc,
|
return RocksDBIndex::update(trx, mthds, oldDocumentId, oldDoc,
|
||||||
newDocumentId, newDoc, mode);
|
newDocumentId, newDoc, mode);
|
||||||
} else {
|
|
||||||
Result res;
|
|
||||||
rocksdb::Status s;
|
|
||||||
bool equal = true;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < _paths.size(); ++i) {
|
|
||||||
TRI_ASSERT(!_paths[i].empty());
|
|
||||||
VPackSlice oldSlice = oldDoc.get(_paths[i]);
|
|
||||||
VPackSlice newSlice = newDoc.get(_paths[i]);
|
|
||||||
|
|
||||||
if ((oldSlice.isNone() || oldSlice.isNull()) &&
|
|
||||||
(newSlice.isNone() || newSlice.isNull())) {
|
|
||||||
// attribute not found
|
|
||||||
if (_sparse) {
|
|
||||||
// if sparse we do not have to index, this is indicated by result
|
|
||||||
// being shorter than n
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
} else if (basics::VelocyPackHelper::compare(oldSlice, newSlice, true)) {
|
|
||||||
|
bool equal = true;
|
||||||
|
for (std::vector<basics::AttributeName> const& path : _fields) {
|
||||||
|
if (!::attributesEqual(oldDoc, newDoc, path.begin(), path.end())) {
|
||||||
equal = false;
|
equal = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -779,6 +809,7 @@ Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthd
|
||||||
newDocumentId, newDoc, mode);
|
newDocumentId, newDoc, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result res;
|
||||||
// more expensive method to
|
// more expensive method to
|
||||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||||
SmallVector<RocksDBKey> elements{elementsArena};
|
SmallVector<RocksDBKey> elements{elementsArena};
|
||||||
|
@ -798,7 +829,7 @@ Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthd
|
||||||
size_t const count = elements.size();
|
size_t const count = elements.size();
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
RocksDBKey& key = elements[i];
|
RocksDBKey& key = elements[i];
|
||||||
s = mthds->Put(_cf, key, value.string());
|
rocksdb::Status s = mthds->Put(_cf, key, value.string());
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
res = rocksutils::convertStatus(s, rocksutils::index);
|
res = rocksutils::convertStatus(s, rocksutils::index);
|
||||||
break;
|
break;
|
||||||
|
@ -806,7 +837,6 @@ Result RocksDBVPackIndex::update(transaction::Methods& trx, RocksDBMethods* mthd
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief removes a document from the index
|
/// @brief removes a document from the index
|
||||||
|
@ -844,10 +874,10 @@ Result RocksDBVPackIndex::remove(transaction::Methods& trx, RocksDBMethods* mthd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// non-unique index contain the unique objectID
|
// non-unique index contain the unique objectID written exactly once
|
||||||
// they should be written exactly once
|
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
s = mthds->SingleDelete(_cf, elements[i]);
|
s = mthds->SingleDelete(_cf, elements[i]);
|
||||||
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,10 +151,10 @@ class PhysicalCollection {
|
||||||
arangodb::velocypack::Slice const&) const = 0;
|
arangodb::velocypack::Slice const&) const = 0;
|
||||||
|
|
||||||
virtual Result read(transaction::Methods*, arangodb::velocypack::StringRef const& key,
|
virtual Result read(transaction::Methods*, arangodb::velocypack::StringRef const& key,
|
||||||
ManagedDocumentResult& result, bool) = 0;
|
ManagedDocumentResult& result, bool lock) = 0;
|
||||||
|
|
||||||
virtual Result read(transaction::Methods*, arangodb::velocypack::Slice const& key,
|
virtual Result read(transaction::Methods*, arangodb::velocypack::Slice const& key,
|
||||||
ManagedDocumentResult& result, bool) = 0;
|
ManagedDocumentResult& result, bool lock) = 0;
|
||||||
|
|
||||||
virtual bool readDocument(transaction::Methods* trx, LocalDocumentId const& token,
|
virtual bool readDocument(transaction::Methods* trx, LocalDocumentId const& token,
|
||||||
ManagedDocumentResult& result) const = 0;
|
ManagedDocumentResult& result) const = 0;
|
||||||
|
|
|
@ -130,7 +130,7 @@ void Collections::enumerate(TRI_vocbase_t* vocbase,
|
||||||
/*static*/ arangodb::Result methods::Collections::lookup( // find collection
|
/*static*/ arangodb::Result methods::Collections::lookup( // find collection
|
||||||
TRI_vocbase_t const& vocbase, // vocbase to search
|
TRI_vocbase_t const& vocbase, // vocbase to search
|
||||||
std::string const& name, // collection name
|
std::string const& name, // collection name
|
||||||
FuncCallback const& func // invoke on found collection
|
FuncCallback func // invoke on found collection
|
||||||
) {
|
) {
|
||||||
if (name.empty()) {
|
if (name.empty()) {
|
||||||
return Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
return Result(TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND);
|
||||||
|
@ -208,7 +208,7 @@ void Collections::enumerate(TRI_vocbase_t* vocbase,
|
||||||
arangodb::velocypack::Slice const& properties, // collection properties
|
arangodb::velocypack::Slice const& properties, // collection properties
|
||||||
bool createWaitsForSyncReplication, // replication wait flag
|
bool createWaitsForSyncReplication, // replication wait flag
|
||||||
bool enforceReplicationFactor, // replication factor flag
|
bool enforceReplicationFactor, // replication factor flag
|
||||||
FuncCallback const& func // invoke on collection creation
|
FuncCallback func // invoke on collection creation
|
||||||
) {
|
) {
|
||||||
if (name.empty()) {
|
if (name.empty()) {
|
||||||
return TRI_ERROR_ARANGO_ILLEGAL_NAME;
|
return TRI_ERROR_ARANGO_ILLEGAL_NAME;
|
||||||
|
|
|
@ -73,7 +73,7 @@ struct Collections {
|
||||||
static arangodb::Result lookup( // find collection
|
static arangodb::Result lookup( // find collection
|
||||||
TRI_vocbase_t const& vocbase, // vocbase to search
|
TRI_vocbase_t const& vocbase, // vocbase to search
|
||||||
std::string const& name, // collection name
|
std::string const& name, // collection name
|
||||||
FuncCallback const& callback // invoke on found collection
|
FuncCallback callback // invoke on found collection
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Create collection, ownership of collection in callback is
|
/// Create collection, ownership of collection in callback is
|
||||||
|
@ -85,7 +85,7 @@ struct Collections {
|
||||||
arangodb::velocypack::Slice const& properties, // collection properties
|
arangodb::velocypack::Slice const& properties, // collection properties
|
||||||
bool createWaitsForSyncReplication, // replication wait flag
|
bool createWaitsForSyncReplication, // replication wait flag
|
||||||
bool enforceReplicationFactor, // replication factor flag
|
bool enforceReplicationFactor, // replication factor flag
|
||||||
FuncCallback const& callback // invoke on collection creation
|
FuncCallback callback // invoke on collection creation
|
||||||
);
|
);
|
||||||
|
|
||||||
static Result load(TRI_vocbase_t& vocbase, LogicalCollection* coll);
|
static Result load(TRI_vocbase_t& vocbase, LogicalCollection* coll);
|
||||||
|
|
|
@ -235,7 +235,6 @@ SECTION("test_defaults") {
|
||||||
CHECK((logicalCollection.get() == &(link->collection())));
|
CHECK((logicalCollection.get() == &(link->collection())));
|
||||||
CHECK((link->fieldNames().empty()));
|
CHECK((link->fieldNames().empty()));
|
||||||
CHECK((link->fields().empty()));
|
CHECK((link->fields().empty()));
|
||||||
CHECK((true == link->hasBatchInsert()));
|
|
||||||
CHECK((false == link->hasExpansion()));
|
CHECK((false == link->hasExpansion()));
|
||||||
CHECK((false == link->hasSelectivityEstimate()));
|
CHECK((false == link->hasSelectivityEstimate()));
|
||||||
CHECK((false == link->implicitlyUnique()));
|
CHECK((false == link->implicitlyUnique()));
|
||||||
|
@ -286,7 +285,6 @@ SECTION("test_defaults") {
|
||||||
CHECK((logicalCollection.get() == &(link->collection())));
|
CHECK((logicalCollection.get() == &(link->collection())));
|
||||||
CHECK((link->fieldNames().empty()));
|
CHECK((link->fieldNames().empty()));
|
||||||
CHECK((link->fields().empty()));
|
CHECK((link->fields().empty()));
|
||||||
CHECK((true == link->hasBatchInsert()));
|
|
||||||
CHECK((false == link->hasExpansion()));
|
CHECK((false == link->hasExpansion()));
|
||||||
CHECK((false == link->hasSelectivityEstimate()));
|
CHECK((false == link->hasSelectivityEstimate()));
|
||||||
CHECK((false == link->implicitlyUnique()));
|
CHECK((false == link->implicitlyUnique()));
|
||||||
|
|
|
@ -337,7 +337,6 @@ SECTION("test_create_drop") {
|
||||||
CHECK((updatedCollection0.get() == &(index->collection())));
|
CHECK((updatedCollection0.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -445,7 +444,6 @@ SECTION("test_create_drop") {
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
|
|
@ -1287,7 +1287,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1334,7 +1333,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1381,7 +1379,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1516,7 +1513,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1564,7 +1560,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1853,7 +1848,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -1900,7 +1894,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2051,7 +2044,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2099,7 +2091,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2147,7 +2138,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2495,7 +2485,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2542,7 +2531,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2681,7 +2669,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -2810,7 +2797,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -3117,7 +3103,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -3165,7 +3150,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -3212,7 +3196,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
@ -3505,7 +3488,6 @@ TEST_CASE("IResearchViewCoordinatorTest",
|
||||||
CHECK((updatedCollection.get() == &(index->collection())));
|
CHECK((updatedCollection.get() == &(index->collection())));
|
||||||
CHECK((index->fieldNames().empty()));
|
CHECK((index->fieldNames().empty()));
|
||||||
CHECK((index->fields().empty()));
|
CHECK((index->fields().empty()));
|
||||||
CHECK((true == index->hasBatchInsert()));
|
|
||||||
CHECK((false == index->hasExpansion()));
|
CHECK((false == index->hasExpansion()));
|
||||||
CHECK((false == index->hasSelectivityEstimate()));
|
CHECK((false == index->hasSelectivityEstimate()));
|
||||||
CHECK((false == index->implicitlyUnique()));
|
CHECK((false == index->implicitlyUnique()));
|
||||||
|
|
|
@ -169,8 +169,6 @@ class EdgeIndexMock final : public arangodb::Index {
|
||||||
|
|
||||||
size_t memory() const override { return sizeof(EdgeIndexMock); }
|
size_t memory() const override { return sizeof(EdgeIndexMock); }
|
||||||
|
|
||||||
bool hasBatchInsert() const override { return false; }
|
|
||||||
|
|
||||||
void load() override {}
|
void load() override {}
|
||||||
void unload() override {}
|
void unload() override {}
|
||||||
void afterTruncate(TRI_voc_tick_t) override {
|
void afterTruncate(TRI_voc_tick_t) override {
|
||||||
|
|
Loading…
Reference in New Issue