1
0
Fork 0

issue 511.2.1: use references instead of raw pointers for Index operations to avoid null pointer access (#7725)

This commit is contained in:
Vasiliy 2018-12-11 14:40:49 +03:00 committed by Andrey Abramov
parent 5153ea0807
commit f1245af554
55 changed files with 1377 additions and 813 deletions

View File

@ -477,8 +477,10 @@ void ClusterCollection::invokeOnAllElements(
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
Result ClusterCollection::truncate(transaction::Methods* trx, Result ClusterCollection::truncate(
OperationOptions& options) { transaction::Methods& trx,
OperationOptions& options
) {
return Result(TRI_ERROR_NOT_IMPLEMENTED); return Result(TRI_ERROR_NOT_IMPLEMENTED);
} }
@ -537,11 +539,17 @@ Result ClusterCollection::replace(
} }
Result ClusterCollection::remove( Result ClusterCollection::remove(
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, OperationOptions& options, velocypack::Slice slice,
TRI_voc_tick_t& resultMarkerTick, bool, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
TRI_voc_rid_t& revisionId, KeyLockInfo* /*keyLock*/, OperationOptions& options,
std::function<Result(void)> /*callbackDuringLock*/) { TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* /*keyLock*/,
std::function<Result(void)> /*callbackDuringLock*/
) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
} }

View File

@ -128,7 +128,10 @@ class ClusterCollection final : public PhysicalCollection {
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
Result truncate(transaction::Methods* trx, OperationOptions&) override; Result truncate(
transaction::Methods& trx,
OperationOptions& options
) override;
void deferDropCollection( void deferDropCollection(
std::function<bool(LogicalCollection&)> const& callback std::function<bool(LogicalCollection&)> const& callback
@ -175,13 +178,18 @@ class ClusterCollection final : public PhysicalCollection {
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock) override; std::function<Result(void)> callbackDuringLock) override;
Result remove(arangodb::transaction::Methods* trx, Result remove(
arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, velocypack::Slice slice,
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, ManagedDocumentResult& previous,
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, OperationOptions& options,
KeyLockInfo* /*keyLockInfo*/, TRI_voc_tick_t& resultMarkerTick,
std::function<Result(void)> callbackDuringLock) override; bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock
) override;
protected: protected:
/// @brief Inject figures that are specific to StorageEngine /// @brief Inject figures that are specific to StorageEngine

View File

@ -76,7 +76,25 @@ class ClusterIndex : public Index {
void unload() override {} void unload() override {}
size_t memory() const override { return 0; } size_t memory() const override { return 0; }
int drop() override { return TRI_ERROR_NOT_IMPLEMENTED; } Result insert(
transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override {
return Result(TRI_ERROR_NOT_IMPLEMENTED);
}
Result remove(
transaction::Methods& trx,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
Index::OperationMode mode
) override {
return Result(TRI_ERROR_NOT_IMPLEMENTED);
}
Result drop() override { return Result(TRI_ERROR_NOT_IMPLEMENTED); }
bool hasCoveringIterator() const override; bool hasCoveringIterator() const override;
@ -96,21 +114,23 @@ class ClusterIndex : public Index {
arangodb::aql::AstNode* specializeCondition( arangodb::aql::AstNode* specializeCondition(
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override; arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
virtual arangodb::IndexIterator* iteratorForCondition(
arangodb::transaction::Methods* trx,
arangodb::ManagedDocumentResult* result,
arangodb::aql::AstNode const* condNode,
arangodb::aql::Variable const* var,
arangodb::IndexIteratorOptions const& opts
) override {
TRI_ASSERT(false); // should not be called
return nullptr;
}
/// @brief provides a size hint for the index /// @brief provides a size hint for the index
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final { Result sizeHint(
// nothing to do here transaction::Methods& /*trx*/,
return TRI_ERROR_NO_ERROR; size_t /*size*/
} ) override final {
return Result(); // nothing to do here
Result insert(transaction::Methods* trx, LocalDocumentId const& documentId,
velocypack::Slice const& doc, OperationMode mode) override {
return TRI_ERROR_NOT_IMPLEMENTED;
}
Result remove(transaction::Methods* trx, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
OperationMode mode) override {
return TRI_ERROR_NOT_IMPLEMENTED;
} }
void updateProperties(velocypack::Slice const&); void updateProperties(velocypack::Slice const&);

View File

@ -290,7 +290,7 @@ void IResearchLink::afterTruncate() {
} }
void IResearchLink::batchInsert( void IResearchLink::batchInsert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& batch, std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& batch,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
) { ) {
@ -302,15 +302,7 @@ void IResearchLink::batchInsert(
throw std::runtime_error(std::string("failed to report status during batch insert for arangosearch link '") + arangodb::basics::StringUtils::itoa(_id) + "'"); throw std::runtime_error(std::string("failed to report status during batch insert for arangosearch link '") + arangodb::basics::StringUtils::itoa(_id) + "'");
} }
if (!trx) { if (!trx.state()) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
<< "failed to get transaction while inserting a document into arangosearch link '" << id() << "'";
queue->setStatus(TRI_ERROR_BAD_PARAMETER); // 'trx' required
return;
}
if (!trx->state()) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC) LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
<< "failed to get transaction state while inserting a document into arangosearch link '" << id() << "'"; << "failed to get transaction state while inserting a document into arangosearch link '" << id() << "'";
queue->setStatus(TRI_ERROR_BAD_PARAMETER); // transaction state required queue->setStatus(TRI_ERROR_BAD_PARAMETER); // transaction state required
@ -318,7 +310,7 @@ void IResearchLink::batchInsert(
return; return;
} }
auto& state = *(trx->state()); auto& state = *(trx.state());
auto* key = this; auto* key = this;
// TODO FIXME find a better way to look up a ViewState // TODO FIXME find a better way to look up a ViewState
@ -348,7 +340,7 @@ void IResearchLink::batchInsert(
ctx = ptr.get(); ctx = ptr.get();
state.cookie(key, std::move(ptr)); state.cookie(key, std::move(ptr));
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) { if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC) LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
<< "failed to store state into a TransactionState for batch insert into arangosearch link '" << id() << "', tid '" << state.id() << "'"; << "failed to store state into a TransactionState for batch insert into arangosearch link '" << id() << "', tid '" << state.id() << "'";
queue->setStatus(TRI_ERROR_INTERNAL); queue->setStatus(TRI_ERROR_INTERNAL);
@ -872,26 +864,19 @@ arangodb::Result IResearchLink::initDataStore(IResearchView const& view) {
} }
arangodb::Result IResearchLink::insert( arangodb::Result IResearchLink::insert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
arangodb::LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& doc, arangodb::velocypack::Slice const& doc,
Index::OperationMode mode arangodb::Index::OperationMode mode
) { ) {
if (!trx) { if (!trx.state()) {
return arangodb::Result(
TRI_ERROR_BAD_PARAMETER,
std::string("failed to get transaction while inserting a document into arangosearch link '") + std::to_string(id()) + "'"
);
}
if (!trx->state()) {
return arangodb::Result( return arangodb::Result(
TRI_ERROR_BAD_PARAMETER, TRI_ERROR_BAD_PARAMETER,
std::string("failed to get transaction state while inserting a document into arangosearch link '") + std::to_string(id()) + "'" std::string("failed to get transaction state while inserting a document into arangosearch link '") + std::to_string(id()) + "'"
); );
} }
auto& state = *(trx->state()); auto& state = *(trx.state());
auto* key = this; auto* key = this;
// TODO FIXME find a better way to look up a ViewState // TODO FIXME find a better way to look up a ViewState
@ -920,7 +905,7 @@ arangodb::Result IResearchLink::insert(
ctx = ptr.get(); ctx = ptr.get();
state.cookie(key, std::move(ptr)); state.cookie(key, std::move(ptr));
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) { if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
return arangodb::Result( return arangodb::Result(
TRI_ERROR_INTERNAL, TRI_ERROR_INTERNAL,
std::string("failed to store state into a TransactionState for insert into arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'" std::string("failed to store state into a TransactionState for insert into arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'"
@ -1049,26 +1034,19 @@ size_t IResearchLink::memory() const {
} }
arangodb::Result IResearchLink::remove( arangodb::Result IResearchLink::remove(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& /*doc*/, arangodb::velocypack::Slice const& /*doc*/,
Index::OperationMode /*mode*/ arangodb::Index::OperationMode /*mode*/
) { ) {
if (!trx) { if (!trx.state()) {
return arangodb::Result(
TRI_ERROR_BAD_PARAMETER,
std::string("failed to get transaction while removing a document from arangosearch link '") + std::to_string(id()) + "'"
);
}
if (!trx->state()) {
return arangodb::Result( return arangodb::Result(
TRI_ERROR_BAD_PARAMETER, TRI_ERROR_BAD_PARAMETER,
std::string("failed to get transaction state while removing a document into arangosearch link '") + std::to_string(id()) + "'" std::string("failed to get transaction state while removing a document into arangosearch link '") + std::to_string(id()) + "'"
); );
} }
auto& state = *(trx->state()); auto& state = *(trx.state());
auto* key = this; auto* key = this;
// TODO FIXME find a better way to look up a ViewState // TODO FIXME find a better way to look up a ViewState
@ -1097,7 +1075,7 @@ arangodb::Result IResearchLink::remove(
ctx = ptr.get(); ctx = ptr.get();
state.cookie(key, std::move(ptr)); state.cookie(key, std::move(ptr));
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) { if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
return arangodb::Result( return arangodb::Result(
TRI_ERROR_INTERNAL, TRI_ERROR_INTERNAL,
std::string("failed to store state into a TransactionState for remove from arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'" std::string("failed to store state into a TransactionState for remove from arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'"

View File

@ -94,8 +94,8 @@ class IResearchLink {
/// '_meta' params /// '_meta' params
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
virtual void batchInsert( virtual void batchInsert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& batch, std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& batch,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
); // arangodb::Index override ); // arangodb::Index override
@ -133,10 +133,10 @@ class IResearchLink {
/// @brief insert an ArangoDB document into an iResearch View using '_meta' params /// @brief insert an ArangoDB document into an iResearch View using '_meta' params
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
arangodb::Result insert( arangodb::Result insert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& doc, arangodb::velocypack::Slice const& doc,
Index::OperationMode mode arangodb::Index::OperationMode mode
); // arangodb::Index override ); // arangodb::Index override
bool isPersistent() const; // arangodb::Index override bool isPersistent() const; // arangodb::Index override
@ -176,10 +176,10 @@ class IResearchLink {
/// @brief remove an ArangoDB document from an iResearch View /// @brief remove an ArangoDB document from an iResearch View
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
arangodb::Result remove( arangodb::Result remove(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
arangodb::LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& doc, arangodb::velocypack::Slice const& doc,
Index::OperationMode mode arangodb::Index::OperationMode mode
); // arangodb::Index override ); // arangodb::Index override
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////

View File

@ -72,7 +72,7 @@ class IResearchLinkCoordinator final
} }
virtual void batchInsert( virtual void batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents, std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
) override { ) override {
@ -81,7 +81,7 @@ class IResearchLinkCoordinator final
virtual bool canBeDropped() const override { return true; } virtual bool canBeDropped() const override { return true; }
virtual int drop() override { return TRI_ERROR_NO_ERROR; } virtual arangodb::Result drop() override { return arangodb::Result(); }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief the factory for this type of index /// @brief the factory for this type of index
@ -94,7 +94,7 @@ class IResearchLinkCoordinator final
virtual bool hasSelectivityEstimate() const override { return false; } virtual bool hasSelectivityEstimate() const override { return false; }
virtual arangodb::Result insert( virtual arangodb::Result insert(
transaction::Methods* trx, transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, VPackSlice const& doc,
OperationMode mode OperationMode mode
@ -108,6 +108,17 @@ class IResearchLinkCoordinator final
// IResearch does not provide a fixed default sort order // IResearch does not provide a fixed default sort order
virtual bool isSorted() const override { return false; } virtual bool isSorted() const override { return false; }
virtual arangodb::IndexIterator* iteratorForCondition(
arangodb::transaction::Methods* trx,
arangodb::ManagedDocumentResult* result,
arangodb::aql::AstNode const* condNode,
arangodb::aql::Variable const* var,
arangodb::IndexIteratorOptions const& opts
) override {
TRI_ASSERT(false); // should not be called
return nullptr;
}
virtual void load() override { /* NOOP */ } virtual void load() override { /* NOOP */ }
virtual bool matchesDefinition( virtual bool matchesDefinition(
@ -117,7 +128,7 @@ class IResearchLinkCoordinator final
virtual size_t memory() const override { return _meta.memory(); } virtual size_t memory() const override { return _meta.memory(); }
arangodb::Result remove( arangodb::Result remove(
transaction::Methods* trx, transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, VPackSlice const& doc,
OperationMode mode OperationMode mode

View File

@ -47,7 +47,7 @@ class IResearchMMFilesLink final
}; };
virtual void batchInsert( virtual void batchInsert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents, std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
) override { ) override {
@ -58,9 +58,7 @@ class IResearchMMFilesLink final
return IResearchLink::canBeDropped(); return IResearchLink::canBeDropped();
} }
virtual int drop() override { virtual arangodb::Result drop() override { return IResearchLink::drop(); }
return IResearchLink::drop().errorNumber();
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief the factory for this type of index /// @brief the factory for this type of index
@ -76,10 +74,10 @@ class IResearchMMFilesLink final
} }
virtual arangodb::Result insert( virtual arangodb::Result insert(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& doc, arangodb::velocypack::Slice const& doc,
OperationMode mode arangodb::Index::OperationMode mode
) override { ) override {
return IResearchLink::insert(trx, documentId, doc, mode); return IResearchLink::insert(trx, documentId, doc, mode);
} }
@ -92,6 +90,17 @@ class IResearchMMFilesLink final
return IResearchLink::isSorted(); return IResearchLink::isSorted();
} }
virtual arangodb::IndexIterator* iteratorForCondition(
arangodb::transaction::Methods* trx,
arangodb::ManagedDocumentResult* result,
arangodb::aql::AstNode const* condNode,
arangodb::aql::Variable const* var,
arangodb::IndexIteratorOptions const& opts
) override {
TRI_ASSERT(false); // should not be called
return nullptr;
}
virtual void load() override { virtual void load() override {
IResearchLink::load(); IResearchLink::load();
} }
@ -107,8 +116,8 @@ class IResearchMMFilesLink final
} }
arangodb::Result remove( arangodb::Result remove(
transaction::Methods* trx, transaction::Methods& trx,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
VPackSlice const& doc, VPackSlice const& doc,
OperationMode mode OperationMode mode
) override { ) override {

View File

@ -47,7 +47,7 @@ class IResearchRocksDBLink final
}; };
virtual void batchInsert( virtual void batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents, std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
) override { ) override {
@ -58,10 +58,10 @@ class IResearchRocksDBLink final
return IResearchLink::canBeDropped(); return IResearchLink::canBeDropped();
} }
virtual int drop() override { virtual Result drop() override {
writeRocksWalMarker(); writeRocksWalMarker();
return IResearchLink::drop().errorNumber(); return IResearchLink::drop();
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -78,11 +78,11 @@ class IResearchRocksDBLink final
} }
virtual arangodb::Result insertInternal( virtual arangodb::Result insertInternal(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
arangodb::RocksDBMethods*, arangodb::RocksDBMethods* methods,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
const arangodb::velocypack::Slice& doc, arangodb::velocypack::Slice const& doc,
OperationMode mode arangodb::Index::OperationMode mode
) override { ) override {
return IResearchLink::insert(trx, documentId, doc, mode); return IResearchLink::insert(trx, documentId, doc, mode);
} }
@ -91,6 +91,17 @@ class IResearchRocksDBLink final
return IResearchLink::isSorted(); return IResearchLink::isSorted();
} }
virtual arangodb::IndexIterator* iteratorForCondition(
arangodb::transaction::Methods* trx,
arangodb::ManagedDocumentResult* result,
arangodb::aql::AstNode const* condNode,
arangodb::aql::Variable const* var,
arangodb::IndexIteratorOptions const& opts
) override {
TRI_ASSERT(false); // should not be called
return nullptr;
}
virtual void load() override { virtual void load() override {
IResearchLink::load(); IResearchLink::load();
} }
@ -106,11 +117,11 @@ class IResearchRocksDBLink final
} }
virtual arangodb::Result removeInternal( virtual arangodb::Result removeInternal(
transaction::Methods* trx, arangodb::transaction::Methods& trx,
arangodb::RocksDBMethods*, arangodb::RocksDBMethods*,
LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
const arangodb::velocypack::Slice& doc, arangodb::velocypack::Slice const& doc,
OperationMode mode arangodb::Index::OperationMode mode
) override { ) override {
return IResearchLink::remove(trx, documentId, doc, mode); return IResearchLink::remove(trx, documentId, doc, mode);
} }

View File

@ -333,12 +333,7 @@ void IResearchRocksDBRecoveryHelper::PutCF(uint32_t column_family_id,
continue; // index was already populated when it was created continue; // index was already populated when it was created
} }
link->insert( link->insert(trx, docId, doc, arangodb::Index::OperationMode::internal);
&trx,
docId,
doc,
Index::OperationMode::internal
);
} }
trx.commit(); trx.commit();
@ -377,10 +372,10 @@ void IResearchRocksDBRecoveryHelper::handleDeleteCF(uint32_t column_family_id,
for (auto link : links) { for (auto link : links) {
link->remove( link->remove(
&trx, trx,
docId, docId,
arangodb::velocypack::Slice::emptyObjectSlice(), arangodb::velocypack::Slice::emptyObjectSlice(),
Index::OperationMode::internal arangodb::Index::OperationMode::internal
); );
} }

View File

@ -550,7 +550,7 @@ bool Index::implicitlyUnique() const {
} }
void Index::batchInsert( void Index::batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& documents, std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) { std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
for (auto const& it : documents) { for (auto const& it : documents) {
@ -563,15 +563,13 @@ void Index::batchInsert(
} }
/// @brief default implementation for drop /// @brief default implementation for drop
int Index::drop() { Result Index::drop() {
// do nothing return Result(); // do nothing
return TRI_ERROR_NO_ERROR;
} }
/// @brief default implementation for sizeHint /// @brief default implementation for sizeHint
int Index::sizeHint(transaction::Methods*, size_t) { Result Index::sizeHint(transaction::Methods& trx, size_t size) {
// do nothing return Result(); // do nothing
return TRI_ERROR_NO_ERROR;
} }
/// @brief default implementation for hasBatchInsert /// @brief default implementation for hasBatchInsert

View File

@ -302,32 +302,38 @@ class Index {
virtual void toVelocyPackFigures(arangodb::velocypack::Builder&) const; virtual void toVelocyPackFigures(arangodb::velocypack::Builder&) const;
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPackFigures() const; std::shared_ptr<arangodb::velocypack::Builder> toVelocyPackFigures() const;
virtual Result insert(transaction::Methods*,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) = 0;
virtual Result remove(transaction::Methods*,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) = 0;
virtual void batchInsert( virtual void batchInsert(
transaction::Methods*, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&, std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& docs,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue); std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
);
virtual Result insert(
transaction::Methods& trx,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
OperationMode mode
) = 0;
virtual Result remove(
transaction::Methods& trx,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
OperationMode mode
) = 0;
virtual void load() = 0; virtual void load() = 0;
virtual void unload() = 0; virtual void unload() = 0;
// called when the index is dropped // called when the index is dropped
virtual int drop(); virtual Result drop();
/// @brief called after the collection was truncated /// @brief called after the collection was truncated
/// @param tick at which truncate was applied /// @param tick at which truncate was applied
virtual void afterTruncate(TRI_voc_tick_t tick) {}; virtual void afterTruncate(TRI_voc_tick_t tick) {};
// give index a hint about the expected size // give index a hint about the expected size
virtual int sizeHint(transaction::Methods*, size_t); virtual Result sizeHint(transaction::Methods& trx, size_t size);
virtual bool hasBatchInsert() const; virtual bool hasBatchInsert() const;
@ -343,13 +349,13 @@ class Index {
virtual arangodb::aql::AstNode* specializeCondition(arangodb::aql::AstNode*, virtual arangodb::aql::AstNode* specializeCondition(arangodb::aql::AstNode*,
arangodb::aql::Variable const*) const; arangodb::aql::Variable const*) const;
virtual IndexIterator* iteratorForCondition(transaction::Methods*, virtual IndexIterator* iteratorForCondition(
ManagedDocumentResult*, transaction::Methods* trx,
arangodb::aql::AstNode const*, ManagedDocumentResult* result,
arangodb::aql::Variable const*, aql::AstNode const* condNode,
IndexIteratorOptions const&) { aql::Variable const* var,
return nullptr; // IResearch will never use this IndexIteratorOptions const& opts
}; ) = 0;
bool canUseConditionPart(arangodb::aql::AstNode const* access, bool canUseConditionPart(arangodb::aql::AstNode const* access,
arangodb::aql::AstNode const* other, arangodb::aql::AstNode const* other,

View File

@ -120,7 +120,9 @@ namespace {
class MMFilesIndexFillerTask : public basics::LocalTask { class MMFilesIndexFillerTask : public basics::LocalTask {
public: public:
MMFilesIndexFillerTask( MMFilesIndexFillerTask(
std::shared_ptr<basics::LocalTaskQueue> const& queue, transaction::Methods* trx, Index* idx, std::shared_ptr<basics::LocalTaskQueue> const& queue,
transaction::Methods& trx,
Index* idx,
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> const& documents) std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> const& documents)
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {} : LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
@ -137,7 +139,7 @@ class MMFilesIndexFillerTask : public basics::LocalTask {
} }
private: private:
transaction::Methods* _trx; transaction::Methods& _trx;
Index* _idx; Index* _idx;
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> _documents; std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> _documents;
}; };
@ -1633,8 +1635,10 @@ bool MMFilesCollection::tryLockForCompaction() {
void MMFilesCollection::finishCompaction() { _compactionLock.unlock(); } void MMFilesCollection::finishCompaction() { _compactionLock.unlock(); }
/// @brief iterator for index open /// @brief iterator for index open
bool MMFilesCollection::openIndex(VPackSlice const& description, bool MMFilesCollection::openIndex(
transaction::Methods* trx) { velocypack::Slice const& description,
transaction::Methods& trx
) {
// VelocyPack must be an index description // VelocyPack must be an index description
if (!description.isObject()) { if (!description.isObject()) {
return false; return false;
@ -1653,10 +1657,12 @@ bool MMFilesCollection::openIndex(VPackSlice const& description,
/// @brief initializes an index with a set of existing documents /// @brief initializes an index with a set of existing documents
void MMFilesCollection::fillIndex( void MMFilesCollection::fillIndex(
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue, transaction::Methods* trx, std::shared_ptr<basics::LocalTaskQueue> queue,
arangodb::Index* idx, transaction::Methods& trx,
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> documents, Index* idx,
bool skipPersistent) { std::shared_ptr<std::vector<std::pair<LocalDocumentId, velocypack::Slice>>> documents,
bool skipPersistent
) {
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
if (!useSecondaryIndexes()) { if (!useSecondaryIndexes()) {
@ -1680,16 +1686,17 @@ void MMFilesCollection::fillIndex(
uint32_t MMFilesCollection::indexBuckets() const { return _indexBuckets; } uint32_t MMFilesCollection::indexBuckets() const { return _indexBuckets; }
int MMFilesCollection::fillAllIndexes(transaction::Methods* trx) { int MMFilesCollection::fillAllIndexes(transaction::Methods& trx) {
READ_LOCKER(guard, _indexesLock); READ_LOCKER(guard, _indexesLock);
return fillIndexes(trx, _indexes); return fillIndexes(trx, _indexes);
} }
/// @brief Fill the given list of Indexes /// @brief Fill the given list of Indexes
int MMFilesCollection::fillIndexes( int MMFilesCollection::fillIndexes(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::shared_ptr<arangodb::Index>> const& indexes, std::vector<std::shared_ptr<arangodb::Index>> const& indexes,
bool skipPersistent) { bool skipPersistent
) {
// distribute the work to index threads plus this thread // distribute the work to index threads plus this thread
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
size_t const n = indexes.size(); size_t const n = indexes.size();
@ -1777,8 +1784,7 @@ int MMFilesCollection::fillIndexes(
uint64_t total = 0; uint64_t total = 0;
while (true) { while (true) {
MMFilesSimpleIndexElement element = auto element = primaryIdx->lookupSequential(&trx, position, total);
primaryIdx->lookupSequential(trx, position, total);
if (!element) { if (!element) {
break; break;
@ -1937,7 +1943,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
useSecondaryIndexes(false); useSecondaryIndexes(false);
try { try {
detectIndexes(&trx); detectIndexes(trx);
useSecondaryIndexes(old); useSecondaryIndexes(old);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
useSecondaryIndexes(old); useSecondaryIndexes(old);
@ -1959,7 +1965,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
if (!engine->inRecovery() && !engine->upgrading()) { if (!engine->inRecovery() && !engine->upgrading()) {
// build the index structures, and fill the indexes // build the index structures, and fill the indexes
fillAllIndexes(&trx); fillAllIndexes(trx);
} }
// successfully opened collection. now adjust version number // successfully opened collection. now adjust version number
@ -2234,7 +2240,6 @@ std::shared_ptr<Index> MMFilesCollection::lookupIndex(
std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slice const& info, std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slice const& info,
bool restore, bool& created) { bool restore, bool& created) {
SingleCollectionTransaction trx( SingleCollectionTransaction trx(
transaction::StandaloneContext::Create(_logicalCollection.vocbase()), transaction::StandaloneContext::Create(_logicalCollection.vocbase()),
_logicalCollection, _logicalCollection,
@ -2246,7 +2251,8 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slic
THROW_ARANGO_EXCEPTION(res); THROW_ARANGO_EXCEPTION(res);
} }
std::shared_ptr<Index> idx = createIndex(&trx, info, restore, created); auto idx = createIndex(trx, info, restore, created);
if (idx) { if (idx) {
res = trx.commit(); res = trx.commit();
} }
@ -2254,16 +2260,18 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slic
return idx; return idx;
} }
std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods* trx, std::shared_ptr<Index> MMFilesCollection::createIndex(
VPackSlice const& info, transaction::Methods& trx,
velocypack::Slice const& info,
bool restore, bool restore,
bool& created) { bool& created
) {
// prevent concurrent dropping // prevent concurrent dropping
// TRI_ASSERT(trx->isLocked(&_logicalCollection, AccessMode::Type::READ)); // TRI_ASSERT(trx->isLocked(&_logicalCollection, AccessMode::Type::READ));
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
TRI_ASSERT(info.isObject()); TRI_ASSERT(info.isObject());
std::shared_ptr<Index> idx = lookupIndex(info); std::shared_ptr<Index> idx = lookupIndex(info);
if (idx != nullptr) { // We already have this index. if (idx != nullptr) { // We already have this index.
created = false; created = false;
return idx; return idx;
@ -2321,8 +2329,10 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods* trx,
} }
/// @brief Persist an index information to file /// @brief Persist an index information to file
int MMFilesCollection::saveIndex(transaction::Methods* trx, int MMFilesCollection::saveIndex(
std::shared_ptr<arangodb::Index> idx) { transaction::Methods& trx,
std::shared_ptr<Index> idx
) {
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
// we cannot persist PrimaryIndex // we cannot persist PrimaryIndex
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX); TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
@ -2823,14 +2833,16 @@ int MMFilesCollection::unlockWrite(bool useDeadlockDetector, TransactionState co
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
Result MMFilesCollection::truncate(transaction::Methods* trx, Result MMFilesCollection::truncate(
OperationOptions& options) { transaction::Methods& trx,
OperationOptions& options
) {
auto primaryIdx = primaryIndex(); auto primaryIdx = primaryIndex();
options.ignoreRevs = true; options.ignoreRevs = true;
// create remove marker // create remove marker
transaction::BuilderLeaser builder(trx); transaction::BuilderLeaser builder(&trx);
auto callback = [&](MMFilesSimpleIndexElement const& element) { auto callback = [&](MMFilesSimpleIndexElement const& element) {
LocalDocumentId const oldDocumentId = element.localDocumentId(); LocalDocumentId const oldDocumentId = element.localDocumentId();
@ -2841,7 +2853,10 @@ Result MMFilesCollection::truncate(transaction::Methods* trx,
LocalDocumentId const documentId = LocalDocumentId::create(); LocalDocumentId const documentId = LocalDocumentId::create();
TRI_voc_rid_t revisionId; TRI_voc_rid_t revisionId;
newObjectForRemove(trx, oldDoc, *builder.get(), options.isRestore, revisionId);
newObjectForRemove(
&trx, oldDoc, *builder.get(), options.isRestore, revisionId
);
Result res = removeFastPath(trx, revisionId, oldDocumentId, VPackSlice(vpack), Result res = removeFastPath(trx, revisionId, oldDocumentId, VPackSlice(vpack),
options, documentId, builder->slice()); options, documentId, builder->slice());
@ -3007,8 +3022,16 @@ Result MMFilesCollection::insert(
try { try {
// insert into indexes // insert into indexes
res = insertDocument(trx, documentId, revisionId, doc, operation, res = insertDocument(
marker, options, options.waitForSync); *trx,
documentId,
revisionId,
doc,
operation,
marker,
options,
options.waitForSync
);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
res = Result(ex.code()); res = Result(ex.code());
} catch (std::bad_alloc const&) { } catch (std::bad_alloc const&) {
@ -3278,8 +3301,11 @@ Result MMFilesCollection::deletePrimaryIndex(
/// @brief creates a new entry in the secondary indexes /// @brief creates a new entry in the secondary indexes
Result MMFilesCollection::insertSecondaryIndexes( Result MMFilesCollection::insertSecondaryIndexes(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId, arangodb::transaction::Methods& trx,
VPackSlice const& doc, Index::OperationMode mode) { LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) {
// Coordinator doesn't know index internals // Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
TRI_IF_FAILURE("InsertSecondaryIndexes") { return Result(TRI_ERROR_DEBUG); } TRI_IF_FAILURE("InsertSecondaryIndexes") { return Result(TRI_ERROR_DEBUG); }
@ -3324,8 +3350,11 @@ Result MMFilesCollection::insertSecondaryIndexes(
/// @brief deletes an entry from the secondary indexes /// @brief deletes an entry from the secondary indexes
Result MMFilesCollection::deleteSecondaryIndexes( Result MMFilesCollection::deleteSecondaryIndexes(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId, transaction::Methods& trx,
VPackSlice const& doc, Index::OperationMode mode) { LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) {
// Coordintor doesn't know index internals // Coordintor doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator()); TRI_ASSERT(!ServerState::instance()->isCoordinator());
@ -3362,7 +3391,7 @@ Result MMFilesCollection::deleteSecondaryIndexes(
} }
/// @brief enumerate all indexes of the collection, but don't fill them yet /// @brief enumerate all indexes of the collection, but don't fill them yet
int MMFilesCollection::detectIndexes(transaction::Methods* trx) { int MMFilesCollection::detectIndexes(transaction::Methods& trx) {
StorageEngine* engine = EngineSelectorFeature::ENGINE; StorageEngine* engine = EngineSelectorFeature::ENGINE;
VPackBuilder builder; VPackBuilder builder;
@ -3393,12 +3422,14 @@ int MMFilesCollection::detectIndexes(transaction::Methods* trx) {
/// This function guarantees all or nothing, /// This function guarantees all or nothing,
/// If it returns NO_ERROR all indexes are filled. /// If it returns NO_ERROR all indexes are filled.
/// If it returns an error no documents are inserted /// If it returns an error no documents are inserted
Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx, Result MMFilesCollection::insertIndexes(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationOptions& options) { OperationOptions& options
) {
// insert into primary index first // insert into primary index first
Result res = insertPrimaryIndex(trx, documentId, doc, options); auto res = insertPrimaryIndex(&trx, documentId, doc, options);
if (res.fail()) { if (res.fail()) {
// insert has failed // insert has failed
@ -3411,7 +3442,7 @@ Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
if (res.fail()) { if (res.fail()) {
deleteSecondaryIndexes(trx, documentId, doc, deleteSecondaryIndexes(trx, documentId, doc,
Index::OperationMode::rollback); Index::OperationMode::rollback);
deletePrimaryIndex(trx, documentId, doc, options); deletePrimaryIndex(&trx, documentId, doc, options);
} }
return res; return res;
} }
@ -3419,14 +3450,21 @@ Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
/// @brief insert a document, low level worker /// @brief insert a document, low level worker
/// the caller must make sure the write lock on the collection is held /// the caller must make sure the write lock on the collection is held
Result MMFilesCollection::insertDocument( Result MMFilesCollection::insertDocument(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId, arangodb::transaction::Methods& trx,
TRI_voc_rid_t revisionId, VPackSlice const& doc, LocalDocumentId const& documentId,
MMFilesDocumentOperation& operation, MMFilesWalMarker const* marker, TRI_voc_rid_t revisionId,
OperationOptions& options, bool& waitForSync) { velocypack::Slice const& doc,
MMFilesDocumentOperation& operation,
MMFilesWalMarker const* marker,
OperationOptions& options,
bool& waitForSync
) {
Result res = insertIndexes(trx, documentId, doc, options); Result res = insertIndexes(trx, documentId, doc, options);
if (res.fail()) { if (res.fail()) {
return res; return res;
} }
operation.indexed(); operation.indexed();
TRI_IF_FAILURE("InsertDocumentNoOperation") { return Result(TRI_ERROR_DEBUG); } TRI_IF_FAILURE("InsertDocumentNoOperation") { return Result(TRI_ERROR_DEBUG); }
@ -3435,7 +3473,8 @@ Result MMFilesCollection::insertDocument(
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
} }
return Result(static_cast<MMFilesTransactionState*>(trx->state()) return Result(
static_cast<MMFilesTransactionState*>(trx.state())
->addOperation(documentId, revisionId, operation, marker, waitForSync)); ->addOperation(documentId, revisionId, operation, marker, waitForSync));
} }
@ -3554,9 +3593,18 @@ Result MMFilesCollection::update(
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()), operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
MMFilesDocumentDescriptor(documentId, newDoc.begin())); MMFilesDocumentDescriptor(documentId, newDoc.begin()));
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId, res = updateDocument(
newDoc, operation, marker, options, *trx,
options.waitForSync); revisionId,
oldDocumentId,
oldDoc,
documentId,
newDoc,
operation,
marker,
options,
options.waitForSync
);
if (res.ok() && callbackDuringLock != nullptr) { if (res.ok() && callbackDuringLock != nullptr) {
res = callbackDuringLock(); res = callbackDuringLock();
@ -3693,9 +3741,18 @@ Result MMFilesCollection::replace(
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()), operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
MMFilesDocumentDescriptor(documentId, newDoc.begin())); MMFilesDocumentDescriptor(documentId, newDoc.begin()));
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId, res = updateDocument(
newDoc, operation, marker, options, *trx,
options.waitForSync); revisionId,
oldDocumentId,
oldDoc,
documentId,
newDoc,
operation,
marker,
options,
options.waitForSync
);
if (res.ok() && callbackDuringLock != nullptr) { if (res.ok() && callbackDuringLock != nullptr) {
res = callbackDuringLock(); res = callbackDuringLock();
@ -3725,17 +3782,25 @@ Result MMFilesCollection::replace(
} }
Result MMFilesCollection::remove( Result MMFilesCollection::remove(
arangodb::transaction::Methods* trx, VPackSlice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, OperationOptions& options, velocypack::Slice slice,
TRI_voc_tick_t& resultMarkerTick, bool lock, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
TRI_voc_rid_t& revisionId, KeyLockInfo* keyLockInfo, OperationOptions& options,
std::function<Result(void)> callbackDuringLock) { TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock
) {
prevRev = 0; prevRev = 0;
LocalDocumentId const documentId = LocalDocumentId::create();
transaction::BuilderLeaser builder(trx); LocalDocumentId const documentId = LocalDocumentId::create();
newObjectForRemove(trx, slice, *builder.get(), options.isRestore, revisionId); transaction::BuilderLeaser builder(&trx);
newObjectForRemove(
&trx, slice, *builder.get(), options.isRestore, revisionId
);
TRI_IF_FAILURE("RemoveDocumentNoMarker") { TRI_IF_FAILURE("RemoveDocumentNoMarker") {
// test what happens when no marker can be created // test what happens when no marker can be created
@ -3750,7 +3815,7 @@ Result MMFilesCollection::remove(
// create marker // create marker
MMFilesCrudMarker removeMarker( MMFilesCrudMarker removeMarker(
TRI_DF_MARKER_VPACK_REMOVE, TRI_DF_MARKER_VPACK_REMOVE,
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), static_cast<MMFilesTransactionState*>(trx.state())->idForMarker(),
documentId, documentId,
builder->slice()); builder->slice());
MMFilesWalMarker const* marker; MMFilesWalMarker const* marker;
@ -3785,12 +3850,15 @@ Result MMFilesCollection::remove(
&_logicalCollection, TRI_VOC_DOCUMENT_OPERATION_REMOVE &_logicalCollection, TRI_VOC_DOCUMENT_OPERATION_REMOVE
); );
bool const useDeadlockDetector = bool const useDeadlockDetector =
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD)); (lock
&& !trx.isSingleOperationTransaction()
&& !trx.state()->hasHint(transaction::Hints::Hint::NO_DLD)
);
arangodb::MMFilesCollectionWriteLocker collectionLocker( arangodb::MMFilesCollectionWriteLocker collectionLocker(
this, useDeadlockDetector, trx->state(), lock); this, useDeadlockDetector, trx.state(), lock);
// get the previous revision // get the previous revision
Result res = lookupDocument(trx, key, previous); Result res = lookupDocument(&trx, key, previous);
if (res.fail()) { if (res.fail()) {
return res; return res;
@ -3804,7 +3872,8 @@ Result MMFilesCollection::remove(
// Check old revision: // Check old revision:
if (!options.ignoreRevs && slice.isObject()) { if (!options.ignoreRevs && slice.isObject()) {
TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice); TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice);
res = checkRevision(trx, expectedRevisionId, oldRevisionId);
res = checkRevision(&trx, expectedRevisionId, oldRevisionId);
if (res.fail()) { if (res.fail()) {
return res; return res;
@ -3826,7 +3895,7 @@ Result MMFilesCollection::remove(
THROW_ARANGO_EXCEPTION(res); THROW_ARANGO_EXCEPTION(res);
} }
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options); res = deletePrimaryIndex(&trx, oldDocumentId, oldDoc, options);
if (res.fail()) { if (res.fail()) {
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
@ -3849,9 +3918,9 @@ Result MMFilesCollection::remove(
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
} }
res = res = static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
static_cast<MMFilesTransactionState*>(trx->state()) documentId, revisionId, operation, marker, options.waitForSync
->addOperation(documentId, revisionId, operation, marker, options.waitForSync); );
if (res.ok() && callbackDuringLock != nullptr) { if (res.ok() && callbackDuringLock != nullptr) {
res = callbackDuringLock(); res = callbackDuringLock();
@ -3867,7 +3936,7 @@ Result MMFilesCollection::remove(
} }
if (res.fail()) { if (res.fail()) {
operation.revert(trx); operation.revert(&trx);
} else { } else {
// store the tick that was used for removing the document // store the tick that was used for removing the document
resultMarkerTick = operation.tick(); resultMarkerTick = operation.tick();
@ -3890,10 +3959,15 @@ void MMFilesCollection::deferDropCollection(
/// @brief rolls back a document operation /// @brief rolls back a document operation
Result MMFilesCollection::rollbackOperation( Result MMFilesCollection::rollbackOperation(
transaction::Methods* trx, TRI_voc_document_operation_e type, transaction::Methods& trx,
LocalDocumentId const& oldDocumentId, VPackSlice const& oldDoc, TRI_voc_document_operation_e type,
LocalDocumentId const& newDocumentId, VPackSlice const& newDoc) { LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc
) {
OperationOptions options; OperationOptions options;
options.indexOperationMode= Index::OperationMode::rollback; options.indexOperationMode= Index::OperationMode::rollback;
if (type == TRI_VOC_DOCUMENT_OPERATION_INSERT) { if (type == TRI_VOC_DOCUMENT_OPERATION_INSERT) {
@ -3903,9 +3977,11 @@ Result MMFilesCollection::rollbackOperation(
TRI_ASSERT(!newDoc.isNone()); TRI_ASSERT(!newDoc.isNone());
// ignore any errors we're getting from this // ignore any errors we're getting from this
deletePrimaryIndex(trx, newDocumentId, newDoc, options); deletePrimaryIndex(&trx, newDocumentId, newDoc, options);
deleteSecondaryIndexes(trx, newDocumentId, newDoc, deleteSecondaryIndexes(
Index::OperationMode::rollback); trx, newDocumentId, newDoc, Index::OperationMode::rollback
);
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
@ -3931,11 +4007,12 @@ Result MMFilesCollection::rollbackOperation(
TRI_ASSERT(newDocumentId.empty()); TRI_ASSERT(newDocumentId.empty());
TRI_ASSERT(newDoc.isNone()); TRI_ASSERT(newDoc.isNone());
Result res = insertPrimaryIndex(trx, oldDocumentId, oldDoc, options); auto res = insertPrimaryIndex(&trx, oldDocumentId, oldDoc, options);
if (res.ok()) { if (res.ok()) {
res = insertSecondaryIndexes(trx, oldDocumentId, oldDoc, res = insertSecondaryIndexes(
Index::OperationMode::rollback); trx, oldDocumentId, oldDoc, Index::OperationMode::rollback
);
} else { } else {
LOG_TOPIC(ERR, arangodb::Logger::ENGINES) LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
<< "error rolling back remove operation"; << "error rolling back remove operation";
@ -3951,13 +4028,15 @@ Result MMFilesCollection::rollbackOperation(
} }
/// @brief removes a document or edge, fast path function for database documents /// @brief removes a document or edge, fast path function for database documents
Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx, Result MMFilesCollection::removeFastPath(
transaction::Methods& trx,
TRI_voc_rid_t revisionId, TRI_voc_rid_t revisionId,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
VPackSlice const oldDoc, velocypack::Slice const oldDoc,
OperationOptions& options, OperationOptions& options,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const toRemove) { velocypack::Slice const toRemove
) {
TRI_IF_FAILURE("RemoveDocumentNoMarker") { TRI_IF_FAILURE("RemoveDocumentNoMarker") {
// test what happens when no marker can be created // test what happens when no marker can be created
return Result(TRI_ERROR_DEBUG); return Result(TRI_ERROR_DEBUG);
@ -3971,9 +4050,10 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
// create marker // create marker
MMFilesCrudMarker removeMarker( MMFilesCrudMarker removeMarker(
TRI_DF_MARKER_VPACK_REMOVE, TRI_DF_MARKER_VPACK_REMOVE,
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), static_cast<MMFilesTransactionState*>(trx.state())->idForMarker(),
documentId, documentId,
toRemove); toRemove
);
MMFilesWalMarker const* marker = &removeMarker; MMFilesWalMarker const* marker = &removeMarker;
@ -4005,7 +4085,7 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
THROW_ARANGO_EXCEPTION(res); THROW_ARANGO_EXCEPTION(res);
} }
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options); res = deletePrimaryIndex(&trx, oldDocumentId, oldDoc, options);
if (res.fail()) { if (res.fail()) {
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
@ -4028,9 +4108,9 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
} }
res = res = static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
static_cast<MMFilesTransactionState*>(trx->state()) documentId, revisionId, operation, marker, options.waitForSync
->addOperation(documentId, revisionId, operation, marker, options.waitForSync); );
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
res = Result(ex.code()); res = Result(ex.code());
} catch (std::bad_alloc const&) { } catch (std::bad_alloc const&) {
@ -4042,7 +4122,7 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
} }
if (res.fail()) { if (res.fail()) {
operation.revert(trx); operation.revert(&trx);
} }
return res; return res;
@ -4075,12 +4155,17 @@ Result MMFilesCollection::lookupDocument(transaction::Methods* trx,
/// @brief updates an existing document, low level worker /// @brief updates an existing document, low level worker
/// the caller must make sure the write lock on the collection is held /// the caller must make sure the write lock on the collection is held
Result MMFilesCollection::updateDocument( Result MMFilesCollection::updateDocument(
transaction::Methods* trx,TRI_voc_rid_t revisionId, transaction::Methods& trx,
TRI_voc_rid_t revisionId,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
VPackSlice const& oldDoc, LocalDocumentId const& newDocumentId, velocypack::Slice const& oldDoc,
VPackSlice const& newDoc, MMFilesDocumentOperation& operation, LocalDocumentId const& newDocumentId,
MMFilesWalMarker const* marker, OperationOptions& options, velocypack::Slice const& newDoc,
bool& waitForSync) { MMFilesDocumentOperation& operation,
MMFilesWalMarker const* marker,
OperationOptions& options,
bool& waitForSync
) {
// remove old document from secondary indexes // remove old document from secondary indexes
// (it will stay in the primary index as the key won't change) // (it will stay in the primary index as the key won't change)
Result res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc, Result res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc,
@ -4110,8 +4195,8 @@ Result MMFilesCollection::updateDocument(
// adjusted) // adjusted)
// TODO: pass key into this function so it does not have to be looked up again // TODO: pass key into this function so it does not have to be looked up again
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(newDoc)); VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(newDoc));
MMFilesSimpleIndexElement* element = auto* element = primaryIndex()->lookupKeyRef(&trx, keySlice);
primaryIndex()->lookupKeyRef(trx, keySlice);
if (element != nullptr && element->isSet()) { if (element != nullptr && element->isSet()) {
element->updateLocalDocumentId( element->updateLocalDocumentId(
newDocumentId, newDocumentId,
@ -4133,8 +4218,11 @@ Result MMFilesCollection::updateDocument(
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG); THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
} }
return Result(static_cast<MMFilesTransactionState*>(trx->state()) return Result(
->addOperation(newDocumentId, revisionId, operation, marker, waitForSync)); static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
newDocumentId, revisionId, operation, marker, waitForSync
)
);
} }
void MMFilesCollection::lockKey(KeyLockInfo& keyLockInfo, VPackSlice const& key) { void MMFilesCollection::lockKey(KeyLockInfo& keyLockInfo, VPackSlice const& key) {

View File

@ -243,7 +243,7 @@ class MMFilesCollection final : public PhysicalCollection {
void useSecondaryIndexes(bool value) { _useSecondaryIndexes = value; } void useSecondaryIndexes(bool value) { _useSecondaryIndexes = value; }
int fillAllIndexes(transaction::Methods*); int fillAllIndexes(transaction::Methods& trx);
void prepareIndexes(arangodb::velocypack::Slice indexesSlice) override; void prepareIndexes(arangodb::velocypack::Slice indexesSlice) override;
@ -258,9 +258,13 @@ class MMFilesCollection final : public PhysicalCollection {
std::shared_ptr<Index> createIndex(arangodb::velocypack::Slice const& info, std::shared_ptr<Index> createIndex(arangodb::velocypack::Slice const& info,
bool restore, bool& created) override; bool restore, bool& created) override;
std::shared_ptr<Index> createIndex(transaction::Methods* trx,
arangodb::velocypack::Slice const& info, std::shared_ptr<Index> createIndex(
bool restore, bool& created); transaction::Methods& trx,
velocypack::Slice const& info,
bool restore,
bool& created
);
/// @brief Drop an index with the given iid. /// @brief Drop an index with the given iid.
bool dropIndex(TRI_idx_iid_t iid) override; bool dropIndex(TRI_idx_iid_t iid) override;
@ -281,7 +285,10 @@ class MMFilesCollection final : public PhysicalCollection {
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
Result truncate(transaction::Methods* trx, OperationOptions&) override; Result truncate(
transaction::Methods& trx,
OperationOptions& options
) override;
/// @brief Defer a callback to be executed when the collection /// @brief Defer a callback to be executed when the collection
/// can be dropped. The callback is supposed to drop /// can be dropped. The callback is supposed to drop
@ -340,19 +347,27 @@ class MMFilesCollection final : public PhysicalCollection {
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock) override; std::function<Result(void)> callbackDuringLock) override;
Result remove(arangodb::transaction::Methods* trx, Result remove(
arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, velocypack::Slice slice,
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, ManagedDocumentResult& previous,
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* keyLockInfo, KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock) override; std::function<Result(void)> callbackDuringLock
) override;
Result rollbackOperation(transaction::Methods*, TRI_voc_document_operation_e, Result rollbackOperation(
transaction::Methods& trx,
TRI_voc_document_operation_e type,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc); velocypack::Slice const& newDoc
);
MMFilesDocumentPosition insertLocalDocumentId(LocalDocumentId const& documentId, MMFilesDocumentPosition insertLocalDocumentId(LocalDocumentId const& documentId,
uint8_t const* dataptr, uint8_t const* dataptr,
@ -378,27 +393,38 @@ class MMFilesCollection final : public PhysicalCollection {
private: private:
void sizeHint(transaction::Methods* trx, int64_t hint); void sizeHint(transaction::Methods* trx, int64_t hint);
bool openIndex(VPackSlice const& description, transaction::Methods* trx); bool openIndex(
velocypack::Slice const& description,
transaction::Methods& trx
);
/// @brief initializes an index with all existing documents /// @brief initializes an index with all existing documents
void fillIndex(std::shared_ptr<basics::LocalTaskQueue>, transaction::Methods*, Index*, void fillIndex(
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>>, std::shared_ptr<basics::LocalTaskQueue> queue,
bool); transaction::Methods& trx,
Index* index,
std::shared_ptr<std::vector<std::pair<LocalDocumentId, velocypack::Slice>>> docs,
bool skipPersistent
);
/// @brief Fill indexes used in recovery /// @brief Fill indexes used in recovery
int fillIndexes(transaction::Methods*, int fillIndexes(
std::vector<std::shared_ptr<Index>> const&, transaction::Methods& trx,
bool skipPersistent = true); std::vector<std::shared_ptr<Index>> const& indexes,
bool skipPersistent = true
);
int openWorker(bool ignoreErrors); int openWorker(bool ignoreErrors);
Result removeFastPath(arangodb::transaction::Methods* trx, Result removeFastPath(
transaction::Methods& trx,
TRI_voc_rid_t revisionId, TRI_voc_rid_t revisionId,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const oldDoc, velocypack::Slice const oldDoc,
OperationOptions& options, OperationOptions& options,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const toRemove); velocypack::Slice const toRemove
);
static int OpenIteratorHandleDocumentMarker(MMFilesMarker const* marker, static int OpenIteratorHandleDocumentMarker(MMFilesMarker const* marker,
MMFilesDatafile* datafile, MMFilesDatafile* datafile,
@ -437,13 +463,16 @@ class MMFilesCollection final : public PhysicalCollection {
MMFilesDocumentPosition lookupDocument(LocalDocumentId const& documentId) const; MMFilesDocumentPosition lookupDocument(LocalDocumentId const& documentId) const;
Result insertDocument(arangodb::transaction::Methods* trx, Result insertDocument(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
TRI_voc_rid_t revisionId, TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc, velocypack::Slice const& doc,
MMFilesDocumentOperation& operation, MMFilesDocumentOperation& operation,
MMFilesWalMarker const* marker, MMFilesWalMarker const* marker,
OperationOptions& options, bool& waitForSync); OperationOptions& options,
bool& waitForSync
);
uint8_t const* lookupDocumentVPack(LocalDocumentId const& documentId) const; uint8_t const* lookupDocumentVPack(LocalDocumentId const& documentId) const;
uint8_t const* lookupDocumentVPackConditional(LocalDocumentId const& documentId, uint8_t const* lookupDocumentVPackConditional(LocalDocumentId const& documentId,
@ -462,39 +491,51 @@ class MMFilesCollection final : public PhysicalCollection {
// SECTION: Index storage // SECTION: Index storage
int saveIndex(transaction::Methods* trx, int saveIndex(transaction::Methods& trx, std::shared_ptr<Index> idx);
std::shared_ptr<arangodb::Index> idx);
/// @brief Detect all indexes form file /// @brief Detect all indexes form file
int detectIndexes(transaction::Methods* trx); int detectIndexes(transaction::Methods& trx);
Result insertIndexes(transaction::Methods* trx, LocalDocumentId const& documentId, velocypack::Slice const& doc, OperationOptions& options); Result insertIndexes(
transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
OperationOptions& options
);
Result insertPrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options); Result insertPrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
Result deletePrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options); Result deletePrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
Result insertSecondaryIndexes(transaction::Methods*, Result insertSecondaryIndexes(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
velocypack::Slice const&, velocypack::Slice const& doc,
Index::OperationMode mode); Index::OperationMode mode
);
Result deleteSecondaryIndexes(transaction::Methods*, Result deleteSecondaryIndexes(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
velocypack::Slice const&, velocypack::Slice const& doc,
Index::OperationMode mode); Index::OperationMode mode
);
Result lookupDocument(transaction::Methods*, velocypack::Slice, Result lookupDocument(transaction::Methods*, velocypack::Slice,
ManagedDocumentResult& result); ManagedDocumentResult& result);
Result updateDocument(transaction::Methods*, TRI_voc_rid_t revisionId, Result updateDocument(
transaction::Methods& trx,
TRI_voc_rid_t revisionId,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc, velocypack::Slice const& newDoc,
MMFilesDocumentOperation&, MMFilesDocumentOperation& operation,
MMFilesWalMarker const*, OperationOptions& options, MMFilesWalMarker const* marker,
bool& waitForSync); OperationOptions& options,
bool& waitForSync
);
LocalDocumentId reuseOrCreateLocalDocumentId(OperationOptions const& options) const; LocalDocumentId reuseOrCreateLocalDocumentId(OperationOptions const& options) const;

View File

@ -126,7 +126,9 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
if (status != StatusType::CREATED) { if (status != StatusType::CREATED) {
// remove document from indexes // remove document from indexes
try { try {
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc); physical->rollbackOperation(
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
);
} catch (...) { } catch (...) {
} }
} }
@ -151,7 +153,9 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
if (status != StatusType::CREATED) { if (status != StatusType::CREATED) {
try { try {
// restore the old index state // restore the old index state
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc); physical->rollbackOperation(
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
);
} catch (...) { } catch (...) {
} }
} }
@ -159,10 +163,13 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
// let the primary index entry point to the correct document // let the primary index entry point to the correct document
MMFilesSimpleIndexElement* element = physical->primaryIndex()->lookupKeyRef( MMFilesSimpleIndexElement* element = physical->primaryIndex()->lookupKeyRef(
trx, transaction::helpers::extractKeyFromDocument(newDoc)); trx, transaction::helpers::extractKeyFromDocument(newDoc));
if (element != nullptr && element->isSet()) { if (element != nullptr && element->isSet()) {
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(oldDoc)); VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(oldDoc));
element->updateLocalDocumentId(oldDocumentId, static_cast<uint32_t>(keySlice.begin() - oldDoc.begin())); element->updateLocalDocumentId(oldDocumentId, static_cast<uint32_t>(keySlice.begin() - oldDoc.begin()));
} }
physical->updateLocalDocumentId(oldDocumentId, oldDoc.begin(), 0, false); physical->updateLocalDocumentId(oldDocumentId, oldDoc.begin(), 0, false);
// remove now obsolete new document // remove now obsolete new document
@ -185,7 +192,9 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
if (status != StatusType::CREATED) { if (status != StatusType::CREATED) {
try { try {
// remove from indexes again // remove from indexes again
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc); physical->rollbackOperation(
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
);
} catch (...) { } catch (...) {
} }
} }

View File

@ -274,16 +274,17 @@ void MMFilesEdgeIndex::toVelocyPackFigures(VPackBuilder& builder) const {
builder.close(); builder.close();
} }
Result MMFilesEdgeIndex::insert(transaction::Methods* trx, Result MMFilesEdgeIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc)); MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc)); MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, 1); MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
_edgesFrom->insert(&context, fromElement, true, _edgesFrom->insert(&context, fromElement, true,
mode == OperationMode::rollback); mode == OperationMode::rollback);
@ -306,16 +307,17 @@ Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
return res; return res;
} }
Result MMFilesEdgeIndex::remove(transaction::Methods* trx, Result MMFilesEdgeIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc)); MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc)); MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, 1); MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
try { try {
_edgesFrom->remove(&context, fromElement); _edgesFrom->remove(&context, fromElement);
@ -332,7 +334,7 @@ Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
} }
void MMFilesEdgeIndex::batchInsert( void MMFilesEdgeIndex::batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents, std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) { std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
if (documents.empty()) { if (documents.empty()) {
@ -351,7 +353,7 @@ void MMFilesEdgeIndex::batchInsert(
auto creator = [&trx, this]() -> void* { auto creator = [&trx, this]() -> void* {
ManagedDocumentResult* result = new ManagedDocumentResult; ManagedDocumentResult* result = new ManagedDocumentResult;
return new MMFilesIndexLookupContext(trx, &_collection, result, 1); return new MMFilesIndexLookupContext(&trx, &_collection, result, 1);
}; };
auto destroyer = [](void* userData) { auto destroyer = [](void* userData) {
MMFilesIndexLookupContext* context = static_cast<MMFilesIndexLookupContext*>(userData); MMFilesIndexLookupContext* context = static_cast<MMFilesIndexLookupContext*>(userData);
@ -386,7 +388,7 @@ void MMFilesEdgeIndex::unload() {
} }
/// @brief provides a size hint for the edge index /// @brief provides a size hint for the edge index
int MMFilesEdgeIndex::sizeHint(transaction::Methods* trx, size_t size) { Result MMFilesEdgeIndex::sizeHint(transaction::Methods& trx, size_t size) {
// we assume this is called when setting up the index and the index // we assume this is called when setting up the index and the index
// is still empty // is still empty
TRI_ASSERT(_edgesFrom->size() == 0); TRI_ASSERT(_edgesFrom->size() == 0);
@ -394,7 +396,7 @@ int MMFilesEdgeIndex::sizeHint(transaction::Methods* trx, size_t size) {
// set an initial size for the index for some new nodes to be created // set an initial size for the index for some new nodes to be created
// without resizing // without resizing
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, 1); MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
int err = _edgesFrom->resize(&context, size + 2049); int err = _edgesFrom->resize(&context, size + 2049);
if (err != TRI_ERROR_NO_ERROR) { if (err != TRI_ERROR_NO_ERROR) {

View File

@ -167,20 +167,30 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
void toVelocyPackFigures(VPackBuilder&) const override; void toVelocyPackFigures(VPackBuilder&) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId, void batchInsert(
arangodb::velocypack::Slice const&, OperationMode mode) override; transaction::Methods & trx,
std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, OperationMode mode) override; transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void batchInsert(transaction::Methods*, Result remove(
std::vector<std::pair<LocalDocumentId, VPackSlice>> const&, transaction::Methods& trx,
std::shared_ptr<arangodb::basics::LocalTaskQueue>) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void load() override {} void load() override {}
void unload() override; void unload() override;
int sizeHint(transaction::Methods*, size_t) override; Result sizeHint(transaction::Methods& trx, size_t size) override;
bool hasBatchInsert() const override { return true; } bool hasBatchInsert() const override { return true; }

View File

@ -214,9 +214,12 @@ bool MMFilesFulltextIndex::matchesDefinition(VPackSlice const& info) const {
return true; return true;
} }
Result MMFilesFulltextIndex::insert(transaction::Methods*, Result MMFilesFulltextIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
Index::OperationMode mode
) {
Result res; Result res;
int r = TRI_ERROR_NO_ERROR; int r = TRI_ERROR_NO_ERROR;
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);
@ -229,9 +232,12 @@ Result MMFilesFulltextIndex::insert(transaction::Methods*,
return res; return res;
} }
Result MMFilesFulltextIndex::remove(transaction::Methods*, Result MMFilesFulltextIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
Index::OperationMode mode
) {
Result res; Result res;
int r = TRI_ERROR_NO_ERROR; int r = TRI_ERROR_NO_ERROR;
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);

View File

@ -66,13 +66,19 @@ class MMFilesFulltextIndex final : public MMFilesIndex {
bool matchesDefinition(VPackSlice const&) const override; bool matchesDefinition(VPackSlice const&) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result remove(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void load() override {} void load() override {}
void unload() override; void unload() override;

View File

@ -328,9 +328,12 @@ bool MMFilesGeoIndex::matchesDefinition(VPackSlice const& info) const {
return true; return true;
} }
Result MMFilesGeoIndex::insert(transaction::Methods*, Result MMFilesGeoIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
arangodb::Index::OperationMode mode
) {
// covering and centroid of coordinate / polygon / ... // covering and centroid of coordinate / polygon / ...
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1; size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
std::vector<S2CellId> cells; std::vector<S2CellId> cells;
@ -357,9 +360,12 @@ Result MMFilesGeoIndex::insert(transaction::Methods*,
return res; return res;
} }
Result MMFilesGeoIndex::remove(transaction::Methods*, Result MMFilesGeoIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
arangodb::Index::OperationMode mode
) {
// covering and centroid of coordinate / polygon / ... // covering and centroid of coordinate / polygon / ...
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1; size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
std::vector<S2CellId> cells(reserve); std::vector<S2CellId> cells(reserve);

View File

@ -86,13 +86,19 @@ class MMFilesGeoIndex final : public MMFilesIndex, public geo_index::Index {
bool matchesDefinition(velocypack::Slice const& info) const override; bool matchesDefinition(velocypack::Slice const& info) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
arangodb::Index::OperationMode mode
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result remove(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
arangodb::Index::OperationMode mode
) override;
IndexIterator* iteratorForCondition(transaction::Methods*, IndexIterator* iteratorForCondition(transaction::Methods*,
ManagedDocumentResult*, ManagedDocumentResult*,

View File

@ -474,24 +474,27 @@ bool MMFilesHashIndex::matchesDefinition(VPackSlice const& info) const {
return true; return true;
} }
Result MMFilesHashIndex::insert(transaction::Methods* trx, Result MMFilesHashIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
if (_unique) { if (_unique) {
return insertUnique(trx, documentId, doc, mode); return insertUnique(&trx, documentId, doc, mode);
} }
return insertMulti(trx, documentId, doc, mode); return insertMulti(&trx, documentId, doc, mode);
} }
/// @brief removes an entry from the hash array part of the hash index /// @brief removes an entry from the hash array part of the hash index
Result MMFilesHashIndex::remove(transaction::Methods* trx, Result MMFilesHashIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
std::vector<MMFilesHashIndexElement*> elements; std::vector<MMFilesHashIndexElement*> elements;
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc); int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
@ -505,9 +508,9 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
for (auto& hashElement : elements) { for (auto& hashElement : elements) {
int result; int result;
if (_unique) { if (_unique) {
result = removeUniqueElement(trx, hashElement, mode); result = removeUniqueElement(&trx, hashElement, mode);
} else { } else {
result = removeMultiElement(trx, hashElement, mode); result = removeMultiElement(&trx, hashElement, mode);
} }
// we may be looping through this multiple times, and if an error // we may be looping through this multiple times, and if an error
@ -522,14 +525,15 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
} }
void MMFilesHashIndex::batchInsert( void MMFilesHashIndex::batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents, std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) { std::shared_ptr<basics::LocalTaskQueue> queue
) {
TRI_ASSERT(queue != nullptr); TRI_ASSERT(queue != nullptr);
if (_unique) { if (_unique) {
batchInsertUnique(trx, documents, queue); batchInsertUnique(&trx, documents, queue);
} else { } else {
batchInsertMulti(trx, documents, queue); batchInsertMulti(&trx, documents, queue);
} }
} }
@ -545,7 +549,7 @@ void MMFilesHashIndex::unload() {
} }
/// @brief provides a size hint for the hash index /// @brief provides a size hint for the hash index
int MMFilesHashIndex::sizeHint(transaction::Methods* trx, size_t size) { Result MMFilesHashIndex::sizeHint(transaction::Methods& trx, size_t size) {
if (_sparse) { if (_sparse) {
// for sparse indexes, we assume that we will have less index entries // for sparse indexes, we assume that we will have less index entries
// than if the index would be fully populated // than if the index would be fully populated
@ -553,7 +557,7 @@ int MMFilesHashIndex::sizeHint(transaction::Methods* trx, size_t size) {
} }
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths()); MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
if (_unique) { if (_unique) {
return _uniqueArray->_hashArray->resize(&context, size); return _uniqueArray->_hashArray->resize(&context, size);

View File

@ -264,22 +264,29 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
bool matchesDefinition(VPackSlice const& info) const override; bool matchesDefinition(VPackSlice const& info) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) override;
void batchInsert( void batchInsert(
transaction::Methods*, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&, std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override; std::shared_ptr<basics::LocalTaskQueue> queue
) override;
Result insert(
transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result remove(
transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void unload() override; void unload() override;
int sizeHint(transaction::Methods*, size_t) override; Result sizeHint(transaction::Methods& trx, size_t size) override;
bool hasBatchInsert() const override { return true; } bool hasBatchInsert() const override { return true; }

View File

@ -310,14 +310,16 @@ size_t MMFilesPersistentIndex::memory() const {
} }
/// @brief inserts a document into the index /// @brief inserts a document into the index
Result MMFilesPersistentIndex::insert(transaction::Methods* trx, Result MMFilesPersistentIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
std::vector<MMFilesSkiplistIndexElement*> elements; std::vector<MMFilesSkiplistIndexElement*> elements;
Result res; Result res;
int r; int r;
try { try {
r = fillElement(elements, documentId, doc); r = fillElement(elements, documentId, doc);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
@ -342,9 +344,9 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
} }
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths()); MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc); VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc);
auto prefix = buildPrefix(trx->vocbase().id(), _collection.id(), _iid); auto prefix = buildPrefix(trx.vocbase().id(), _collection.id(), _iid);
VPackBuilder builder; VPackBuilder builder;
std::vector<std::string> values; std::vector<std::string> values;
@ -412,7 +414,7 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
} }
auto rocksTransaction = auto rocksTransaction =
static_cast<MMFilesTransactionState*>(trx->state())->rocksTransaction(); static_cast<MMFilesTransactionState*>(trx.state())->rocksTransaction();
TRI_ASSERT(rocksTransaction != nullptr); TRI_ASSERT(rocksTransaction != nullptr);
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator(); auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
@ -492,14 +494,16 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
} }
/// @brief removes a document from the index /// @brief removes a document from the index
Result MMFilesPersistentIndex::remove(transaction::Methods* trx, Result MMFilesPersistentIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
std::vector<MMFilesSkiplistIndexElement*> elements; std::vector<MMFilesSkiplistIndexElement*> elements;
Result res; Result res;
int r; int r;
try { try {
r = fillElement(elements, documentId, doc); r = fillElement(elements, documentId, doc);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
@ -524,7 +528,7 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
} }
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths()); MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc); VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc);
VPackBuilder builder; VPackBuilder builder;
std::vector<std::string> values; std::vector<std::string> values;
@ -544,13 +548,13 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
std::string value; std::string value;
value.reserve(keyPrefixSize() + s.byteSize()); value.reserve(keyPrefixSize() + s.byteSize());
value.append(buildPrefix(trx->vocbase().id(), _collection.id(), _iid)); value.append(buildPrefix(trx.vocbase().id(), _collection.id(), _iid));
value.append(s.startAs<char const>(), s.byteSize()); value.append(s.startAs<char const>(), s.byteSize());
values.emplace_back(std::move(value)); values.emplace_back(std::move(value));
} }
auto rocksTransaction = auto rocksTransaction =
static_cast<MMFilesTransactionState*>(trx->state())->rocksTransaction(); static_cast<MMFilesTransactionState*>(trx.state())->rocksTransaction();
TRI_ASSERT(rocksTransaction != nullptr); TRI_ASSERT(rocksTransaction != nullptr);
size_t const count = elements.size(); size_t const count = elements.size();
@ -571,7 +575,7 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
} }
/// @brief called when the index is dropped /// @brief called when the index is dropped
int MMFilesPersistentIndex::drop() { Result MMFilesPersistentIndex::drop() {
return MMFilesPersistentIndexFeature::instance()->dropIndex( return MMFilesPersistentIndexFeature::instance()->dropIndex(
_collection.vocbase().id(), _collection.id(), _iid _collection.vocbase().id(), _collection.id(), _iid
); );

View File

@ -161,17 +161,23 @@ class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
return value; return value;
} }
Result insert(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result remove(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void unload() override {} void unload() override {}
int drop() override; Result drop() override;
/// @brief attempts to locate an entry in the index /// @brief attempts to locate an entry in the index
/// ///
@ -199,6 +205,7 @@ class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
arangodb::aql::AstNode* specializeCondition( arangodb::aql::AstNode* specializeCondition(
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override; arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
}; };
} }
#endif #endif

View File

@ -295,9 +295,12 @@ void MMFilesPrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
_primaryIndex->appendToVelocyPack(builder); _primaryIndex->appendToVelocyPack(builder);
} }
Result MMFilesPrimaryIndex::insert(transaction::Methods*, Result MMFilesPrimaryIndex::insert(
LocalDocumentId const&, transaction::Methods& trx,
VPackSlice const&, OperationMode) { LocalDocumentId const& documentId,
velocypack::Slice const&,
Index::OperationMode mode
) {
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE #ifdef ARANGODB_ENABLE_MAINTAINER_MODE
LOG_TOPIC(WARN, arangodb::Logger::ENGINES) LOG_TOPIC(WARN, arangodb::Logger::ENGINES)
<< "insert() called for primary index"; << "insert() called for primary index";
@ -306,9 +309,12 @@ Result MMFilesPrimaryIndex::insert(transaction::Methods*,
"insert() called for primary index"); "insert() called for primary index");
} }
Result MMFilesPrimaryIndex::remove(transaction::Methods*, Result MMFilesPrimaryIndex::remove(
LocalDocumentId const&, transaction::Methods& trx,
VPackSlice const&, OperationMode) { LocalDocumentId const& documentId,
velocypack::Slice const&,
Index::OperationMode mode
) {
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE #ifdef ARANGODB_ENABLE_MAINTAINER_MODE
LOG_TOPIC(WARN, arangodb::Logger::ENGINES) LOG_TOPIC(WARN, arangodb::Logger::ENGINES)
<< "remove() called for primary index"; << "remove() called for primary index";

View File

@ -219,13 +219,19 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
std::underlying_type<Index::Serialize>::type) const override; std::underlying_type<Index::Serialize>::type) const override;
void toVelocyPackFigures(VPackBuilder&) const override; void toVelocyPackFigures(VPackBuilder&) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result remove(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void load() override {} void load() override {}
void unload() override; void unload() override;

View File

@ -752,13 +752,17 @@ void MMFilesSkiplistIndex::toVelocyPackFigures(VPackBuilder& builder) const {
} }
/// @brief inserts a document into a skiplist index /// @brief inserts a document into a skiplist index
Result MMFilesSkiplistIndex::insert(transaction::Methods* trx, Result MMFilesSkiplistIndex::insert(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
Index::OperationMode mode
) {
std::vector<MMFilesSkiplistIndexElement*> elements; std::vector<MMFilesSkiplistIndexElement*> elements;
Result res; Result res;
int r; int r;
try { try {
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc); r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
@ -778,7 +782,7 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
} }
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths()); MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
// insert into the index. the memory for the element will be owned or freed // insert into the index. the memory for the element will be owned or freed
// by the index // by the index
@ -841,7 +845,10 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
LocalDocumentId rev(found->document()->localDocumentId()); LocalDocumentId rev(found->document()->localDocumentId());
std::string existingId; std::string existingId;
_collection.getPhysical()->readDocumentWithCallback(trx, rev, [&existingId](LocalDocumentId const&, VPackSlice doc) { _collection.getPhysical()->readDocumentWithCallback(
&trx,
rev,
[&existingId](LocalDocumentId const&, velocypack::Slice doc)->void {
existingId = doc.get(StaticStrings::KeyString).copyString(); existingId = doc.get(StaticStrings::KeyString).copyString();
}); });
@ -856,13 +863,16 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
} }
/// @brief removes a document from a skiplist index /// @brief removes a document from a skiplist index
Result MMFilesSkiplistIndex::remove(transaction::Methods* trx, Result MMFilesSkiplistIndex::remove(
transaction::Methods& trx,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationMode mode) { velocypack::Slice const& doc,
Index::OperationMode mode
) {
std::vector<MMFilesSkiplistIndexElement*> elements; std::vector<MMFilesSkiplistIndexElement*> elements;
Result res; Result res;
int r; int r;
try { try {
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc); r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
} catch (basics::Exception const& ex) { } catch (basics::Exception const& ex) {
@ -882,7 +892,7 @@ Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
} }
ManagedDocumentResult result; ManagedDocumentResult result;
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths()); MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
// attempt the removal for skiplist indexes // attempt the removal for skiplist indexes
// ownership for the index element is transferred to the index // ownership for the index element is transferred to the index

View File

@ -289,13 +289,19 @@ class MMFilesSkiplistIndex final : public MMFilesPathBasedIndex {
void toVelocyPackFigures(VPackBuilder&) const override; void toVelocyPackFigures(VPackBuilder&) const override;
Result insert(transaction::Methods*, LocalDocumentId const& documentId, Result insert(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result remove(transaction::Methods*, LocalDocumentId const& documentId, Result remove(
arangodb::velocypack::Slice const&, transaction::Methods& trx,
OperationMode mode) override; LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
void unload() override; void unload() override;

View File

@ -1558,7 +1558,7 @@ int MMFilesWalRecoverState::fillIndexes() {
arangodb::SingleCollectionTransaction trx( arangodb::SingleCollectionTransaction trx(
ctx, *collection, AccessMode::Type::WRITE ctx, *collection, AccessMode::Type::WRITE
); );
int res = physical->fillAllIndexes(&trx); int res = physical->fillAllIndexes(trx);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
return res; return res;

View File

@ -380,19 +380,22 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
return other; return other;
} }
res = fillIndexes(&trx, idx); res = fillIndexes(trx, idx);
if (!res.ok()) { if (!res.ok()) {
THROW_ARANGO_EXCEPTION(res); THROW_ARANGO_EXCEPTION(res);
} }
// we need to sync the selectivity estimates // we need to sync the selectivity estimates
res = engine->settingsManager()->sync(false); res = engine->settingsManager()->sync(false);
if (res.fail()) { if (res.fail()) {
LOG_TOPIC(WARN, Logger::ENGINES) << "could not sync settings: " LOG_TOPIC(WARN, Logger::ENGINES) << "could not sync settings: "
<< res.errorMessage(); << res.errorMessage();
} }
rocksdb::Status s = engine->db()->GetRootDB()->FlushWAL(true); rocksdb::Status s = engine->db()->GetRootDB()->FlushWAL(true);
if (!s.ok()) { if (!s.ok()) {
LOG_TOPIC(WARN, Logger::ENGINES) << "could not flush wal: " LOG_TOPIC(WARN, Logger::ENGINES) << "could not flush wal: "
<< s.ToString(); << s.ToString();
@ -467,7 +470,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
TRI_ASSERT(cindex != nullptr); TRI_ASSERT(cindex != nullptr);
if (iid == cindex->id()) { if (iid == cindex->id()) {
int rv = cindex->drop(); auto rv = cindex->drop().errorNumber();
if (rv == TRI_ERROR_NO_ERROR) { if (rv == TRI_ERROR_NO_ERROR) {
// trigger compaction before deleting the object // trigger compaction before deleting the object
@ -540,10 +543,12 @@ void RocksDBCollection::invokeOnAllElements(
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
Result RocksDBCollection::truncate(transaction::Methods* trx, Result RocksDBCollection::truncate(
OperationOptions& options) { transaction::Methods& trx,
OperationOptions& options
) {
TRI_ASSERT(_objectId != 0); TRI_ASSERT(_objectId != 0);
auto state = RocksDBTransactionState::toState(trx); auto state = RocksDBTransactionState::toState(&trx);
RocksDBMethods* mthds = state->rocksdbMethods(); RocksDBMethods* mthds = state->rocksdbMethods();
if (state->isOnlyExclusiveTransaction() && if (state->isOnlyExclusiveTransaction() &&
@ -597,22 +602,30 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
} }
// add the log entry so we can recover the correct count // add the log entry so we can recover the correct count
auto log = RocksDBLogValue::CollectionTruncate(trx->vocbase().id(), auto log = RocksDBLogValue::CollectionTruncate(
_logicalCollection.id(), _objectId); trx.vocbase().id(), _logicalCollection.id(), _objectId
);
s = batch.PutLogData(log.slice()); s = batch.PutLogData(log.slice());
if (!s.ok()) { if (!s.ok()) {
return rocksutils::convertStatus(s); return rocksutils::convertStatus(s);
} }
rocksdb::WriteOptions wo; rocksdb::WriteOptions wo;
s = db->Write(wo, &batch); s = db->Write(wo, &batch);
if (!s.ok()) { if (!s.ok()) {
return rocksutils::convertStatus(s); return rocksutils::convertStatus(s);
} }
seq = db->GetLatestSequenceNumber() - 1; // post commit sequence seq = db->GetLatestSequenceNumber() - 1; // post commit sequence
uint64_t numDocs = _numberDocuments.exchange(0); uint64_t numDocs = _numberDocuments.exchange(0);
_meta.adjustNumberDocuments(seq, /*revision*/newRevisionId(), - static_cast<int64_t>(numDocs)); _meta.adjustNumberDocuments(seq, /*revision*/newRevisionId(), - static_cast<int64_t>(numDocs));
{ {
READ_LOCKER(guard, _indexesLock); READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& idx : _indexes) { for (std::shared_ptr<Index> const& idx : _indexes) {
@ -665,14 +678,14 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
TRI_ASSERT(key.isString()); TRI_ASSERT(key.isString());
TRI_ASSERT(rid != 0); TRI_ASSERT(rid != 0);
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE); RocksDBSavePoint guard(&trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
state->prepareOperation(_logicalCollection.id(), state->prepareOperation(_logicalCollection.id(),
rid, // actual revision ID!! rid, // actual revision ID!!
TRI_VOC_DOCUMENT_OPERATION_REMOVE); TRI_VOC_DOCUMENT_OPERATION_REMOVE);
LocalDocumentId const docId = RocksDBKey::documentId(iter->key()); LocalDocumentId const docId = RocksDBKey::documentId(iter->key());
auto res = removeDocument(trx, docId, doc, options); auto res = removeDocument(&trx, docId, doc, options);
if (res.fail()) { // Failed to remove document in truncate. if (res.fail()) { // Failed to remove document in truncate.
return res; return res;
@ -688,7 +701,7 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
} }
guard.finish(hasPerformedIntermediateCommit); guard.finish(hasPerformedIntermediateCommit);
trackWaitForSync(trx, options); trackWaitForSync(&trx, options);
iter->Next(); iter->Next();
} }
@ -1099,11 +1112,17 @@ Result RocksDBCollection::replace(
} }
Result RocksDBCollection::remove( Result RocksDBCollection::remove(
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, OperationOptions& options, velocypack::Slice slice,
TRI_voc_tick_t& resultMarkerTick, bool, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
TRI_voc_rid_t& revisionId, KeyLockInfo* /*keyLockInfo*/, OperationOptions& options,
std::function<Result(void)> callbackDuringLock) { TRI_voc_tick_t& resultMarkerTick,
bool /*lock*/,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* /*keyLockInfo*/,
std::function<Result(void)> callbackDuringLock
) {
// store the tick that was used for writing the document // store the tick that was used for writing the document
// note that we don't need it for this engine // note that we don't need it for this engine
resultMarkerTick = 0; resultMarkerTick = 0;
@ -1119,7 +1138,8 @@ Result RocksDBCollection::remove(
TRI_ASSERT(!key.isNone()); TRI_ASSERT(!key.isNone());
// get the previous revision // get the previous revision
Result res = this->read(trx, key, previous, /*lock*/false); auto res = this->read(&trx, key, previous, /*lock*/false);
if (res.fail()) { if (res.fail()) {
return res; return res;
} }
@ -1135,24 +1155,24 @@ Result RocksDBCollection::remove(
// Check old revision: // Check old revision:
if (!options.ignoreRevs && slice.isObject()) { if (!options.ignoreRevs && slice.isObject()) {
TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice); TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice);
int res = checkRevision(trx, expectedRevisionId, oldRevisionId); auto res = checkRevision(&trx, expectedRevisionId, oldRevisionId);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
return Result(res); return Result(res);
} }
} }
auto state = RocksDBTransactionState::toState(trx); auto state = RocksDBTransactionState::toState(&trx);
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE); RocksDBSavePoint guard(&trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
// add possible log statement under guard // add possible log statement under guard
state->prepareOperation( state->prepareOperation(
_logicalCollection.id(), oldRevisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE _logicalCollection.id(), oldRevisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE
); );
res = removeDocument(trx, oldDocumentId, oldDoc, options); res = removeDocument(&trx, oldDocumentId, oldDoc, options);
if (res.ok()) { if (res.ok()) {
trackWaitForSync(trx, options); trackWaitForSync(&trx, options);
bool hasPerformedIntermediateCommit = false; bool hasPerformedIntermediateCommit = false;
@ -1238,12 +1258,14 @@ void RocksDBCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
} }
template<typename WriteBatchType, typename MethodsType> template<typename WriteBatchType, typename MethodsType>
static arangodb::Result fillIndex(transaction::Methods* trx, static arangodb::Result fillIndex(
transaction::Methods& trx,
RocksDBIndex* ridx, RocksDBIndex* ridx,
std::unique_ptr<IndexIterator> it, std::unique_ptr<IndexIterator> it,
WriteBatchType& batch, WriteBatchType& batch,
RocksDBCollection* rcol) { RocksDBCollection* rcol
auto state = RocksDBTransactionState::toState(trx); ) {
auto state = RocksDBTransactionState::toState(&trx);
// fillindex can be non transactional, we just need to clean up // fillindex can be non transactional, we just need to clean up
rocksdb::DB* db = rocksutils::globalRocksDB()->GetRootDB(); rocksdb::DB* db = rocksutils::globalRocksDB()->GetRootDB();
@ -1258,6 +1280,7 @@ static arangodb::Result fillIndex(transaction::Methods* trx,
if (res.ok()) { if (res.ok()) {
res = ridx->insertInternal(trx, &batched, documentId, slice, res = ridx->insertInternal(trx, &batched, documentId, slice,
Index::OperationMode::normal); Index::OperationMode::normal);
if (res.ok()) { if (res.ok()) {
numDocsWritten++; numDocsWritten++;
} }
@ -1306,13 +1329,15 @@ static arangodb::Result fillIndex(transaction::Methods* trx,
/// non-transactional: fill index with existing documents /// non-transactional: fill index with existing documents
/// from this collection /// from this collection
arangodb::Result RocksDBCollection::fillIndexes( arangodb::Result RocksDBCollection::fillIndexes(
transaction::Methods* trx, std::shared_ptr<arangodb::Index> added) { transaction::Methods& trx,
TRI_ASSERT(trx->state()->collection( std::shared_ptr<arangodb::Index> added
) {
TRI_ASSERT(trx.state()->collection(
_logicalCollection.id(), AccessMode::Type::EXCLUSIVE _logicalCollection.id(), AccessMode::Type::EXCLUSIVE
)); ));
std::unique_ptr<IndexIterator> it(new RocksDBAllIndexIterator( std::unique_ptr<IndexIterator> it(new RocksDBAllIndexIterator(
&_logicalCollection, trx, primaryIndex() &_logicalCollection, &trx, primaryIndex()
)); ));
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get()); RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get());
@ -1360,13 +1385,16 @@ Result RocksDBCollection::insertDocument(
READ_LOCKER(guard, _indexesLock); READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& idx : _indexes) { for (std::shared_ptr<Index> const& idx : _indexes) {
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get()); RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
Result tmpres = rIdx->insertInternal(trx, mthds, documentId, doc, auto tmpres = rIdx->insertInternal(
options.indexOperationMode); *trx, mthds, documentId, doc, options.indexOperationMode
);
if (tmpres.fail()) { if (tmpres.fail()) {
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) { if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
// in case of OOM return immediately // in case of OOM return immediately
return tmpres; return tmpres;
} }
if (tmpres.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) || res.ok()) { if (tmpres.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) || res.ok()) {
// "prefer" unique constraint violated over other errors // "prefer" unique constraint violated over other errors
res.reset(tmpres); res.reset(tmpres);
@ -1409,12 +1437,15 @@ Result RocksDBCollection::removeDocument(
Result resInner; Result resInner;
READ_LOCKER(guard, _indexesLock); READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& idx : _indexes) { for (std::shared_ptr<Index> const& idx : _indexes) {
Result tmpres = idx->remove(trx, documentId, doc, options.indexOperationMode); auto tmpres =
idx->remove(*trx, documentId, doc, options.indexOperationMode);
if (tmpres.fail()) { if (tmpres.fail()) {
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) { if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
// in case of OOM return immediately // in case of OOM return immediately
return tmpres; return tmpres;
} }
// for other errors, set result // for other errors, set result
res.reset(tmpres); res.reset(tmpres);
} }
@ -1464,13 +1495,22 @@ Result RocksDBCollection::updateDocument(
READ_LOCKER(guard, _indexesLock); READ_LOCKER(guard, _indexesLock);
for (std::shared_ptr<Index> const& idx : _indexes) { for (std::shared_ptr<Index> const& idx : _indexes) {
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get()); RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
Result tmpres = rIdx->updateInternal(trx, mthd, oldDocumentId, oldDoc, newDocumentId, auto tmpres = rIdx->updateInternal(
newDoc, options.indexOperationMode); *trx,
mthd,
oldDocumentId,
oldDoc,
newDocumentId,
newDoc,
options.indexOperationMode
);
if (tmpres.fail()) { if (tmpres.fail()) {
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) { if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
// in case of OOM return immediately // in case of OOM return immediately
return tmpres; return tmpres;
} }
res.reset(tmpres); res.reset(tmpres);
} }
} }

View File

@ -114,7 +114,10 @@ class RocksDBCollection final : public PhysicalCollection {
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
Result truncate(transaction::Methods* trx, OperationOptions&) override; Result truncate(
transaction::Methods& trx,
OperationOptions& options
) override;
void deferDropCollection( void deferDropCollection(
std::function<bool(LogicalCollection&)> const& callback std::function<bool(LogicalCollection&)> const& callback
@ -169,13 +172,18 @@ class RocksDBCollection final : public PhysicalCollection {
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous, TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock) override; std::function<Result(void)> callbackDuringLock) override;
Result remove(arangodb::transaction::Methods* trx, Result remove(
arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, velocypack::Slice slice,
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, ManagedDocumentResult& previous,
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, OperationOptions& options,
KeyLockInfo* /*keyLockInfo*/, TRI_voc_tick_t& resultMarkerTick,
std::function<Result(void)> callbackDuringLock) override; bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock
) override;
/// adjust the current number of docs /// adjust the current number of docs
void adjustNumberDocuments(TRI_voc_rid_t revisionId, int64_t adjustment); void adjustNumberDocuments(TRI_voc_rid_t revisionId, int64_t adjustment);
@ -208,8 +216,10 @@ class RocksDBCollection final : public PhysicalCollection {
void figuresSpecific(std::shared_ptr<velocypack::Builder>&) override; void figuresSpecific(std::shared_ptr<velocypack::Builder>&) override;
void addIndex(std::shared_ptr<arangodb::Index> idx); void addIndex(std::shared_ptr<arangodb::Index> idx);
arangodb::Result fillIndexes(transaction::Methods*, arangodb::Result fillIndexes(
std::shared_ptr<arangodb::Index>); transaction::Methods& trx,
std::shared_ptr<arangodb::Index> indexes
);
// @brief return the primary index // @brief return the primary index
// WARNING: Make sure that this instance // WARNING: Make sure that this instance

View File

@ -572,18 +572,21 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder,
builder.close(); builder.close();
} }
Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx, Result RocksDBEdgeIndex::insertInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
VPackSlice fromTo = doc.get(_directionAttr); VPackSlice fromTo = doc.get(_directionAttr);
TRI_ASSERT(fromTo.isString()); TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructEdgeIndexValue(_objectId, fromToRef, documentId); key->constructEdgeIndexValue(_objectId, fromToRef, documentId);
VPackSlice toFrom = _isFromIndex VPackSlice toFrom = _isFromIndex
? transaction::helpers::extractToFromDocument(doc) ? transaction::helpers::extractToFromDocument(doc)
: transaction::helpers::extractFromFromDocument(doc); : transaction::helpers::extractFromFromDocument(doc);
@ -595,31 +598,35 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
// acquire rocksdb transaction // acquire rocksdb transaction
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string()); rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
if (s.ok()) { if (s.ok()) {
std::hash<StringRef> hasher; std::hash<StringRef> hasher;
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef)); uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
RocksDBTransactionState::toState(trx)->trackIndexInsert( RocksDBTransactionState::toState(&trx)->trackIndexInsert(
_collection.id(), id(), hash _collection.id(), id(), hash
); );
} else { } else {
res.reset(rocksutils::convertStatus(s)); res.reset(rocksutils::convertStatus(s));
addErrorMsg(res); addErrorMsg(res);
} }
return res; return res;
} }
Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx, Result RocksDBEdgeIndex::removeInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
// VPackSlice primaryKey = doc.get(StaticStrings::KeyString); // VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.get(_directionAttr); VPackSlice fromTo = doc.get(_directionAttr);
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
TRI_ASSERT(fromTo.isString()); TRI_ASSERT(fromTo.isString());
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructEdgeIndexValue(_objectId, fromToRef, documentId); key->constructEdgeIndexValue(_objectId, fromToRef, documentId);
VPackSlice toFrom = _isFromIndex VPackSlice toFrom = _isFromIndex
? transaction::helpers::extractToFromDocument(doc) ? transaction::helpers::extractToFromDocument(doc)
@ -634,7 +641,7 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
if (s.ok()) { if (s.ok()) {
std::hash<StringRef> hasher; std::hash<StringRef> hasher;
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef)); uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
RocksDBTransactionState::toState(trx)->trackIndexRemove( RocksDBTransactionState::toState(&trx)->trackIndexRemove(
_collection.id(), id(), hash _collection.id(), id(), hash
); );
} else { } else {
@ -646,15 +653,16 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
} }
void RocksDBEdgeIndex::batchInsert( void RocksDBEdgeIndex::batchInsert(
transaction::Methods* trx, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents, std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) { std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
auto* mthds = RocksDBTransactionState::toMethods(trx); auto* mthds = RocksDBTransactionState::toMethods(&trx);
for (auto const& doc : documents) { for (auto const& doc : documents) {
VPackSlice fromTo = doc.second.get(_directionAttr); VPackSlice fromTo = doc.second.get(_directionAttr);
TRI_ASSERT(fromTo.isString()); TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructEdgeIndexValue(_objectId, fromToRef, doc.first); key->constructEdgeIndexValue(_objectId, fromToRef, doc.first);
blackListKey(fromToRef); blackListKey(fromToRef);

View File

@ -148,9 +148,10 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
std::underlying_type<Index::Serialize>::type) const override; std::underlying_type<Index::Serialize>::type) const override;
void batchInsert( void batchInsert(
transaction::Methods*, transaction::Methods& trx,
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&, std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override; std::shared_ptr<basics::LocalTaskQueue> queue
) override;
bool hasBatchInsert() const override { return false; } bool hasBatchInsert() const override { return false; }
@ -174,15 +175,21 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
void afterTruncate(TRI_voc_tick_t tick) override; void afterTruncate(TRI_voc_tick_t tick) override;
Result insertInternal(transaction::Methods*, RocksDBMethods*, Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
Result removeInternal(transaction::Methods*, RocksDBMethods*, Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
private: private:
/// @brief create the iterator /// @brief create the iterator

View File

@ -1289,6 +1289,7 @@ arangodb::Result RocksDBEngine::dropCollection(
// delete indexes, RocksDBIndex::drop() has its own check // delete indexes, RocksDBIndex::drop() has its own check
std::vector<std::shared_ptr<Index>> vecShardIndex = coll->getIndexes(); std::vector<std::shared_ptr<Index>> vecShardIndex = coll->getIndexes();
TRI_ASSERT(!vecShardIndex.empty()); TRI_ASSERT(!vecShardIndex.empty());
for (auto& index : vecShardIndex) { for (auto& index : vecShardIndex) {
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(index.get()); RocksDBIndex* ridx = static_cast<RocksDBIndex*>(index.get());
res = RocksDBCollectionMeta::deleteIndexEstimate(db, ridx->objectId()); res = RocksDBCollectionMeta::deleteIndexEstimate(db, ridx->objectId());
@ -1297,7 +1298,8 @@ arangodb::Result RocksDBEngine::dropCollection(
<< res.errorMessage(); << res.errorMessage();
} }
int dropRes = index->drop(); auto dropRes = index->drop().errorNumber();
if (dropRes != TRI_ERROR_NO_ERROR) { if (dropRes != TRI_ERROR_NO_ERROR) {
// We try to remove all indexed values. // We try to remove all indexed values.
// If it does not work they cannot be accessed any more and leaked. // If it does not work they cannot be accessed any more and leaked.

View File

@ -185,13 +185,16 @@ bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
return true; return true;
} }
Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx, Result RocksDBFulltextIndex::insertInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);
if (words.empty()) { if (words.empty()) {
return res; return res;
} }
@ -202,27 +205,32 @@ Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
// size_t const count = words.size(); // size_t const count = words.size();
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId); key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string()); rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
if (!s.ok()) { if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index)); res.reset(rocksutils::convertStatus(s, rocksutils::index));
addErrorMsg(res); addErrorMsg(res);
break; break;
} }
} }
return res; return res;
} }
Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx, Result RocksDBFulltextIndex::removeInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);
if (words.empty()) { if (words.empty()) {
return res; return res;
} }
@ -230,16 +238,19 @@ Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId); key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
rocksdb::Status s = mthd->Delete(_cf, key.ref()); rocksdb::Status s = mthd->Delete(_cf, key.ref());
if (!s.ok()) { if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index)); res.reset(rocksutils::convertStatus(s, rocksutils::index));
addErrorMsg(res); addErrorMsg(res);
break; break;
} }
} }
return res; return res;
} }

View File

@ -111,16 +111,22 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
protected: protected:
/// insert index elements into the specified write batch. /// insert index elements into the specified write batch.
Result insertInternal(transaction::Methods* trx, RocksDBMethods*, Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
/// remove index elements and put it in the specified write batch. /// remove index elements and put it in the specified write batch.
Result removeInternal(transaction::Methods*, RocksDBMethods*, Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
private: private:
std::set<std::string> wordlist(arangodb::velocypack::Slice const&); std::set<std::string> wordlist(arangodb::velocypack::Slice const&);

View File

@ -399,33 +399,43 @@ IndexIterator* RocksDBGeoIndex::iteratorForCondition(
} }
/// internal insert function, set batch or trx before calling /// internal insert function, set batch or trx before calling
Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx, Result RocksDBGeoIndex::insertInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
velocypack::Slice const& doc, velocypack::Slice const& doc,
OperationMode mode) { arangodb::Index::OperationMode mode
) {
Result res; Result res;
// covering and centroid of coordinate / polygon / ... // covering and centroid of coordinate / polygon / ...
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1; size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
std::vector<S2CellId> cells; std::vector<S2CellId> cells;
cells.reserve(reserve); cells.reserve(reserve);
S2Point centroid; S2Point centroid;
res = geo_index::Index::indexCells(doc, cells, centroid); res = geo_index::Index::indexCells(doc, cells, centroid);
if (res.fail()) { if (res.fail()) {
if (res.is(TRI_ERROR_BAD_PARAMETER)) { if (res.is(TRI_ERROR_BAD_PARAMETER)) {
res.reset(); // Invalid, no insert. Index is sparse res.reset(); // Invalid, no insert. Index is sparse
} }
return res; return res;
} }
TRI_ASSERT(!cells.empty()); TRI_ASSERT(!cells.empty());
TRI_ASSERT(S2::IsUnitLength(centroid)); TRI_ASSERT(S2::IsUnitLength(centroid));
RocksDBValue val = RocksDBValue::S2Value(centroid); RocksDBValue val = RocksDBValue::S2Value(centroid);
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
for (S2CellId cell : cells) { for (S2CellId cell : cells) {
key->constructGeoIndexValue(_objectId, cell.id(), documentId); key->constructGeoIndexValue(_objectId, cell.id(), documentId);
rocksdb::Status s = mthd->Put(RocksDBColumnFamily::geo(), key.ref(), val.string()); rocksdb::Status s = mthd->Put(RocksDBColumnFamily::geo(), key.ref(), val.string());
if (!s.ok()) { if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index)); res.reset(rocksutils::convertStatus(s, rocksutils::index));
addErrorMsg(res); addErrorMsg(res);
@ -437,26 +447,33 @@ Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
} }
/// internal remove function, set batch or trx before calling /// internal remove function, set batch or trx before calling
Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx, Result RocksDBGeoIndex::removeInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { arangodb::Index::OperationMode mode
) {
Result res; Result res;
// covering and centroid of coordinate / polygon / ... // covering and centroid of coordinate / polygon / ...
std::vector<S2CellId> cells; std::vector<S2CellId> cells;
S2Point centroid; S2Point centroid;
res = geo_index::Index::indexCells(doc, cells, centroid); res = geo_index::Index::indexCells(doc, cells, centroid);
if (res.fail()) { // might occur if insert is rolled back if (res.fail()) { // might occur if insert is rolled back
if (res.is(TRI_ERROR_BAD_PARAMETER)) { if (res.is(TRI_ERROR_BAD_PARAMETER)) {
res.reset(); // Invalid, no insert. Index is sparse res.reset(); // Invalid, no insert. Index is sparse
} }
return res; return res;
} }
TRI_ASSERT(!cells.empty()); TRI_ASSERT(!cells.empty());
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
// FIXME: can we rely on the region coverer to return // FIXME: can we rely on the region coverer to return
// the same cells everytime for the same parameters ? // the same cells everytime for the same parameters ?
for (S2CellId cell : cells) { for (S2CellId cell : cells) {

View File

@ -82,16 +82,22 @@ class RocksDBGeoIndex final : public RocksDBIndex, public geo_index::Index {
bool matchesDefinition(velocypack::Slice const& info) const override; bool matchesDefinition(velocypack::Slice const& info) const override;
/// insert index elements into the specified write batch. /// insert index elements into the specified write batch.
Result insertInternal(transaction::Methods* trx, RocksDBMethods*, Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; arangodb::Index::OperationMode mode
) override;
/// remove index elements and put it in the specified write batch. /// remove index elements and put it in the specified write batch.
Result removeInternal(transaction::Methods*, RocksDBMethods*, Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& docs,
OperationMode mode) override; arangodb::Index::OperationMode mode
) override;
private: private:
std::string const _typeName; std::string const _typeName;

View File

@ -112,8 +112,19 @@ Result removeKeysOutsideRange(VPackSlice chunkSlice,
builder.clear(); builder.clear();
builder.add(velocypack::ValuePair(docKey.data(), docKey.size(), builder.add(velocypack::ValuePair(docKey.data(), docKey.size(),
velocypack::ValueType::String)); velocypack::ValueType::String));
Result r = physical->remove(&trx, builder.slice(), mdr, options, tick, auto r = physical->remove(
false, prevRev, revisionId, nullptr, nullptr); trx,
builder.slice(),
mdr,
options,
tick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) { if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
// ignore not found, we remove conflicting docs ahead of time // ignore not found, we remove conflicting docs ahead of time
THROW_ARANGO_EXCEPTION(r); THROW_ARANGO_EXCEPTION(r);
@ -147,8 +158,19 @@ Result removeKeysOutsideRange(VPackSlice chunkSlice,
builder.clear(); builder.clear();
builder.add(velocypack::ValuePair(docKey.data(), docKey.size(), builder.add(velocypack::ValuePair(docKey.data(), docKey.size(),
velocypack::ValueType::String)); velocypack::ValueType::String));
Result r = physical->remove(&trx, builder.slice(), mdr, options, tick, auto r = physical->remove(
false, prevRev, revisionId, nullptr, nullptr); trx,
builder.slice(),
mdr,
options,
tick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) { if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
// ignore not found, we remove conflicting docs ahead of time // ignore not found, we remove conflicting docs ahead of time
THROW_ARANGO_EXCEPTION(r); THROW_ARANGO_EXCEPTION(r);
@ -301,9 +323,19 @@ Result syncChunkRocksDB(
keyBuilder->clear(); keyBuilder->clear();
keyBuilder->add(VPackValue(localKey)); keyBuilder->add(VPackValue(localKey));
Result r = auto r = physical->remove(
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick, *trx,
false, prevRev, revisionId, nullptr, nullptr); keyBuilder->slice(),
mdr,
options,
resultTick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) { if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
// ignore not found, we remove conflicting docs ahead of time // ignore not found, we remove conflicting docs ahead of time
return r; return r;
@ -355,9 +387,19 @@ Result syncChunkRocksDB(
keyBuilder->clear(); keyBuilder->clear();
keyBuilder->add(VPackValue(localKey)); keyBuilder->add(VPackValue(localKey));
Result r = auto r = physical->remove(
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick, *trx,
false, prevRev, revisionId, nullptr, nullptr); keyBuilder->slice(),
mdr,
options,
resultTick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) { if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
// ignore not found, we remove conflicting docs ahead of time // ignore not found, we remove conflicting docs ahead of time
return r; return r;
@ -481,16 +523,28 @@ Result syncChunkRocksDB(
keyBuilder->clear(); keyBuilder->clear();
keyBuilder->add(VPackValue(conflictingKey)); keyBuilder->add(VPackValue(conflictingKey));
Result res = auto res = physical->remove(
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick, *trx,
false, prevRev, revisionId, nullptr, nullptr); keyBuilder->slice(),
mdr,
options,
resultTick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (res.ok()) { if (res.ok()) {
++stats.numDocsRemoved; ++stats.numDocsRemoved;
} }
return res; return res;
}; };
LocalDocumentId const documentId = physical->lookupKey(trx, keySlice); LocalDocumentId const documentId = physical->lookupKey(trx, keySlice);
if (!documentId.isSet()) { if (!documentId.isSet()) {
// INSERT // INSERT
TRI_ASSERT(options.indexOperationMode == Index::OperationMode::internal); TRI_ASSERT(options.indexOperationMode == Index::OperationMode::internal);
@ -743,9 +797,19 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
ManagedDocumentResult previous; ManagedDocumentResult previous;
TRI_voc_rid_t resultMarkerTick; TRI_voc_rid_t resultMarkerTick;
TRI_voc_rid_t prevRev, revisionId; TRI_voc_rid_t prevRev, revisionId;
Result r = physical->remove(&trx, tempBuilder.slice(), previous, auto r = physical->remove(
options, resultMarkerTick, false, trx,
prevRev, revisionId, nullptr, nullptr); tempBuilder.slice(),
previous,
options,
resultMarkerTick,
false,
prevRev,
revisionId,
nullptr,
nullptr
);
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) { if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
// ignore not found, we remove conflicting docs ahead of time // ignore not found, we remove conflicting docs ahead of time
THROW_ARANGO_EXCEPTION(r); THROW_ARANGO_EXCEPTION(r);
@ -754,6 +818,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
if (r.ok()) { if (r.ok()) {
++stats.numDocsRemoved; ++stats.numDocsRemoved;
} }
return; return;
} }

View File

@ -202,7 +202,7 @@ void RocksDBIndex::destroyCache() {
_cachePresent = false; _cachePresent = false;
} }
int RocksDBIndex::drop() { Result RocksDBIndex::drop() {
auto* coll = toRocksDBCollection(_collection); auto* coll = toRocksDBCollection(_collection);
// edge index needs to be dropped with prefixSameAsStart = false // edge index needs to be dropped with prefixSameAsStart = false
// otherwise full index scan will not work // otherwise full index scan will not work
@ -235,7 +235,7 @@ int RocksDBIndex::drop() {
} }
#endif #endif
return r.errorNumber(); return r;
} }
void RocksDBIndex::afterTruncate(TRI_voc_tick_t) { void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
@ -247,12 +247,15 @@ void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
} }
} }
Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* mthd, Result RocksDBIndex::updateInternal(
transaction::Methods& trx,
RocksDBMethods* mthd,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
arangodb::velocypack::Slice const& newDoc, velocypack::Slice const& newDoc,
OperationMode mode) { Index::OperationMode mode
) {
// It is illegal to call this method on the primary index // It is illegal to call this method on the primary index
// RocksDBPrimaryIndex must override this method accordingly // RocksDBPrimaryIndex must override this method accordingly
TRI_ASSERT(type() != TRI_IDX_TYPE_PRIMARY_INDEX); TRI_ASSERT(type() != TRI_IDX_TYPE_PRIMARY_INDEX);

View File

@ -65,7 +65,28 @@ class RocksDBIndex : public Index {
bool isPersistent() const override final { return true; } bool isPersistent() const override final { return true; }
int drop() override; Result drop() override;
Result insert(
transaction::Methods& trx,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override {
auto mthds = RocksDBTransactionState::toMethods(&trx);
return insertInternal(trx, mthds, documentId, doc, mode);
}
Result remove(
transaction::Methods& trx,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
Index::OperationMode mode
) override {
auto mthds = RocksDBTransactionState::toMethods(&trx);
return removeInternal(trx, mthds, documentId, doc, mode);
}
virtual void afterTruncate(TRI_voc_tick_t tick) override; virtual void afterTruncate(TRI_voc_tick_t tick) override;
void load() override; void load() override;
@ -76,22 +97,11 @@ class RocksDBIndex : public Index {
void cleanup(); void cleanup();
/// @brief provides a size hint for the index /// @brief provides a size hint for the index
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final { Result sizeHint(
// nothing to do here transaction::Methods& /*trx*/,
return TRI_ERROR_NO_ERROR; size_t /*size*/
} ) override final {
return Result(); // nothing to do here
Result insert(transaction::Methods* trx, LocalDocumentId const& documentId,
velocypack::Slice const& doc, OperationMode mode) override {
auto mthds = RocksDBTransactionState::toMethods(trx);
return insertInternal(trx, mthds, documentId, doc, mode);
}
Result remove(transaction::Methods* trx, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
OperationMode mode) override {
auto mthds = RocksDBTransactionState::toMethods(trx);
return removeInternal(trx, mthds, documentId, doc, mode);
} }
void setCacheEnabled(bool enable) { void setCacheEnabled(bool enable) {
@ -102,23 +112,32 @@ class RocksDBIndex : public Index {
void destroyCache(); void destroyCache();
/// insert index elements into the specified write batch. /// insert index elements into the specified write batch.
virtual Result insertInternal(transaction::Methods* trx, RocksDBMethods*, virtual Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, arangodb::velocypack::Slice const& doc,
OperationMode mode) = 0; Index::OperationMode mode
) = 0;
virtual Result updateInternal(transaction::Methods* trx, RocksDBMethods*, /// remove index elements and put it in the specified write batch.
virtual Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc,
Index::OperationMode mode
) = 0;
virtual Result updateInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc, arangodb::velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc, velocypack::Slice const& newDoc,
OperationMode mode); Index::OperationMode mode
);
/// remove index elements and put it in the specified write batch.
virtual Result removeInternal(transaction::Methods* trx, RocksDBMethods*,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) = 0;
rocksdb::ColumnFamilyHandle* columnFamily() const { return _cf; } rocksdb::ColumnFamilyHandle* columnFamily() const { return _cf; }

View File

@ -334,26 +334,32 @@ bool RocksDBPrimaryIndex::lookupRevision(transaction::Methods* trx,
return true; return true;
} }
Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx, Result RocksDBPrimaryIndex::insertInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& slice, velocypack::Slice const& slice,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice); VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
TRI_ASSERT(keySlice.isString()); TRI_ASSERT(keySlice.isString());
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice)); key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
rocksdb::PinnableSlice val; rocksdb::PinnableSlice val;
rocksdb::Status s = mthd->Get(_cf, key->string(), &val); rocksdb::Status s = mthd->Get(_cf, key->string(), &val);
if (s.ok()) { // detected conflicting primary key if (s.ok()) { // detected conflicting primary key
std::string existingId = keySlice.copyString(); std::string existingId = keySlice.copyString();
if (mode == OperationMode::internal) { if (mode == OperationMode::internal) {
return res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, std::move(existingId)); return res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, std::move(existingId));
} }
res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED); res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED);
return addErrorMsg(res, existingId); return addErrorMsg(res, existingId);
} }
val.Reset(); // clear used memory val.Reset(); // clear used memory
@ -371,22 +377,25 @@ Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx,
return res; return res;
} }
Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx, Result RocksDBPrimaryIndex::updateInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc, velocypack::Slice const& newDoc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(oldDoc); VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(oldDoc);
TRI_ASSERT(keySlice == oldDoc.get(StaticStrings::KeyString)); TRI_ASSERT(keySlice == oldDoc.get(StaticStrings::KeyString));
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice)); key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
TRI_voc_rid_t revision = transaction::helpers::extractRevFromDocument(newDoc); TRI_voc_rid_t revision = transaction::helpers::extractRevFromDocument(newDoc);
auto value = RocksDBValue::PrimaryIndexValue(newDocumentId, revision); auto value = RocksDBValue::PrimaryIndexValue(newDocumentId, revision);
blackListKey(key->string().data(), blackListKey(key->string().data(),
static_cast<uint32_t>(key->string().size())); static_cast<uint32_t>(key->string().size()));
@ -398,24 +407,26 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
return res; return res;
} }
Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx, Result RocksDBPrimaryIndex::removeInternal(
transaction::Methods& trx,
RocksDBMethods* mthd, RocksDBMethods* mthd,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& slice, velocypack::Slice const& slice,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
// TODO: deal with matching revisions? // TODO: deal with matching revisions?
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice); VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
TRI_ASSERT(keySlice.isString()); TRI_ASSERT(keySlice.isString());
RocksDBKeyLeaser key(trx); RocksDBKeyLeaser key(&trx);
key->constructPrimaryIndexValue( key->constructPrimaryIndexValue(
_objectId, StringRef(keySlice)); _objectId, StringRef(keySlice));
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size())); blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
// acquire rocksdb transaction // acquire rocksdb transaction
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx); auto* mthds = RocksDBTransactionState::toMethods(&trx);
rocksdb::Status s = mthds->Delete(_cf, key.ref()); rocksdb::Status s = mthds->Delete(_cf, key.ref());
if (!s.ok()) { if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index)); res.reset(rocksutils::convertStatus(s, rocksutils::index));

View File

@ -177,23 +177,32 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
std::function<bool(LocalDocumentId const&)> callback) const; std::function<bool(LocalDocumentId const&)> callback) const;
/// insert index elements into the specified write batch. /// insert index elements into the specified write batch.
Result insertInternal(transaction::Methods* trx, RocksDBMethods*, Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc,
OperationMode mode) override;
/// remove index elements and put it in the specified write batch. /// remove index elements and put it in the specified write batch.
Result removeInternal(transaction::Methods*, RocksDBMethods*, Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
Result updateInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& oldDocumentId,
velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc,
Index::OperationMode mode
) override;
private: private:
/// @brief create the iterator, for a single attribute, IN operator /// @brief create the iterator, for a single attribute, IN operator

View File

@ -633,28 +633,33 @@ void RocksDBVPackIndex::fillPaths(std::vector<std::vector<std::string>>& paths,
} }
/// @brief inserts a document into the index /// @brief inserts a document into the index
Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx, Result RocksDBVPackIndex::insertInternal(
transaction::Methods& trx,
RocksDBMethods* mthds, RocksDBMethods* mthds,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
rocksdb::Status s; rocksdb::Status s;
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena; SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
SmallVector<RocksDBKey> elements{elementsArena}; SmallVector<RocksDBKey> elements{elementsArena};
SmallVector<uint64_t>::allocator_type::arena_type hashesArena; SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
SmallVector<uint64_t> hashes{hashesArena}; SmallVector<uint64_t> hashes{hashesArena};
{ {
// rethrow all types of exceptions from here... // rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(trx); transaction::BuilderLeaser leased(&trx);
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes); int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
if (r != TRI_ERROR_NO_ERROR) { if (r != TRI_ERROR_NO_ERROR) {
return addErrorMsg(res, r); return addErrorMsg(res, r);
} }
} }
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)); IndexingDisabler guard(
mthds, !_unique && trx.hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)
);
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
@ -681,7 +686,8 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
} }
if (res.ok() && !_unique) { if (res.ok() && !_unique) {
auto state = RocksDBTransactionState::toState(trx); auto state = RocksDBTransactionState::toState(&trx);
for (auto& it : hashes) { for (auto& it : hashes) {
// The estimator is only useful if we are in a non-unique indexes // The estimator is only useful if we are in a non-unique indexes
TRI_ASSERT(!_unique); TRI_ASSERT(!_unique);
@ -691,7 +697,9 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
// find conflicting document // find conflicting document
LocalDocumentId docId = RocksDBValue::documentId(existing); LocalDocumentId docId = RocksDBValue::documentId(existing);
std::string existingKey; std::string existingKey;
bool success = _collection.getPhysical()->readDocumentWithCallback(trx, docId, auto success = _collection.getPhysical()->readDocumentWithCallback(
&trx,
docId,
[&](LocalDocumentId const&, VPackSlice doc) { [&](LocalDocumentId const&, VPackSlice doc) {
existingKey = transaction::helpers::extractKeyFromDocument(doc).copyString(); existingKey = transaction::helpers::extractKeyFromDocument(doc).copyString();
}); });
@ -710,11 +718,14 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
} }
Result RocksDBVPackIndex::updateInternal( Result RocksDBVPackIndex::updateInternal(
transaction::Methods* trx, RocksDBMethods* mthds, transaction::Methods& trx,
RocksDBMethods* mthds,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, velocypack::Slice const& newDoc, LocalDocumentId const& newDocumentId,
OperationMode mode) { velocypack::Slice const& newDoc,
Index::OperationMode mode
) {
if (!_unique || _useExpansion) { if (!_unique || _useExpansion) {
// only unique index supports in-place updates // only unique index supports in-place updates
// lets also not handle the complex case of expanded arrays // lets also not handle the complex case of expanded arrays
@ -723,12 +734,13 @@ Result RocksDBVPackIndex::updateInternal(
} else { } else {
Result res; Result res;
rocksdb::Status s; rocksdb::Status s;
bool equal = true; bool equal = true;
for (size_t i = 0; i < _paths.size(); ++i) { for (size_t i = 0; i < _paths.size(); ++i) {
TRI_ASSERT(!_paths[i].empty()); TRI_ASSERT(!_paths[i].empty());
VPackSlice oldSlice = oldDoc.get(_paths[i]); VPackSlice oldSlice = oldDoc.get(_paths[i]);
VPackSlice newSlice = newDoc.get(_paths[i]); VPackSlice newSlice = newDoc.get(_paths[i]);
if ((oldSlice.isNone() || oldSlice.isNull()) && if ((oldSlice.isNone() || oldSlice.isNull()) &&
(newSlice.isNone() || newSlice.isNull())) { (newSlice.isNone() || newSlice.isNull())) {
// attribute not found // attribute not found
@ -755,8 +767,9 @@ Result RocksDBVPackIndex::updateInternal(
SmallVector<uint64_t> hashes{hashesArena}; SmallVector<uint64_t> hashes{hashesArena};
{ {
// rethrow all types of exceptions from here... // rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(trx); transaction::BuilderLeaser leased(&trx);
int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes); int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
if (r != TRI_ERROR_NO_ERROR) { if (r != TRI_ERROR_NO_ERROR) {
return addErrorMsg(res, r); return addErrorMsg(res, r);
} }
@ -778,14 +791,15 @@ Result RocksDBVPackIndex::updateInternal(
} }
/// @brief removes a document from the index /// @brief removes a document from the index
Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx, Result RocksDBVPackIndex::removeInternal(
transaction::Methods& trx,
RocksDBMethods* mthds, RocksDBMethods* mthds,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
VPackSlice const& doc, velocypack::Slice const& doc,
OperationMode mode) { Index::OperationMode mode
) {
Result res; Result res;
rocksdb::Status s; rocksdb::Status s;
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena; SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
SmallVector<RocksDBKey> elements{elementsArena}; SmallVector<RocksDBKey> elements{elementsArena};
SmallVector<uint64_t>::allocator_type::arena_type hashesArena; SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
@ -793,19 +807,24 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
{ {
// rethrow all types of exceptions from here... // rethrow all types of exceptions from here...
transaction::BuilderLeaser leased(trx); transaction::BuilderLeaser leased(&trx);
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes); int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
if (r != TRI_ERROR_NO_ERROR) { if (r != TRI_ERROR_NO_ERROR) {
return addErrorMsg(res, r); return addErrorMsg(res, r);
} }
} }
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)); IndexingDisabler guard(
mthds, !_unique && trx.hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)
);
size_t const count = elements.size(); size_t const count = elements.size();
if (_unique) { if (_unique) {
for (size_t i = 0; i < count; ++i) { for (size_t i = 0; i < count; ++i) {
s = mthds->Delete(_cf, elements[i]); s = mthds->Delete(_cf, elements[i]);
if (!s.ok()) { if (!s.ok()) {
res.reset(rocksutils::convertStatus(s, rocksutils::index)); res.reset(rocksutils::convertStatus(s, rocksutils::index));
} }
@ -822,7 +841,8 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
} }
if (res.ok() && !_unique) { if (res.ok() && !_unique) {
auto state = RocksDBTransactionState::toState(trx); auto state = RocksDBTransactionState::toState(&trx);
for (auto& it : hashes) { for (auto& it : hashes) {
// The estimator is only useful if we are in a non-unique indexes // The estimator is only useful if we are in a non-unique indexes
TRI_ASSERT(!_unique); TRI_ASSERT(!_unique);

View File

@ -209,22 +209,31 @@ class RocksDBVPackIndex : public RocksDBIndex {
void afterTruncate(TRI_voc_tick_t tick) override; void afterTruncate(TRI_voc_tick_t tick) override;
protected: protected:
Result insertInternal(transaction::Methods*, RocksDBMethods*, Result insertInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&, velocypack::Slice const& doc,
OperationMode mode) override; Index::OperationMode mode
) override;
Result updateInternal(transaction::Methods* trx, RocksDBMethods*, Result removeInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& documentId,
velocypack::Slice const& doc,
Index::OperationMode mode
) override;
Result updateInternal(
transaction::Methods& trx,
RocksDBMethods* methods,
LocalDocumentId const& oldDocumentId, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc, velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId, LocalDocumentId const& newDocumentId,
velocypack::Slice const& newDoc, velocypack::Slice const& newDoc,
OperationMode mode) override; Index::OperationMode mode
) override;
Result removeInternal(transaction::Methods*, RocksDBMethods*,
LocalDocumentId const& documentId,
arangodb::velocypack::Slice const&,
OperationMode mode) override;
private: private:
/// @brief return the number of paths /// @brief return the number of paths

View File

@ -133,8 +133,10 @@ class PhysicalCollection {
// -- SECTION DML Operations -- // -- SECTION DML Operations --
/////////////////////////////////// ///////////////////////////////////
virtual Result truncate(transaction::Methods* trx, virtual Result truncate(
OperationOptions& options) = 0; transaction::Methods& trx,
OperationOptions& options
) = 0;
/// @brief Defer a callback to be executed when the collection /// @brief Defer a callback to be executed when the collection
/// can be dropped. The callback is supposed to drop /// can be dropped. The callback is supposed to drop
@ -204,14 +206,18 @@ class PhysicalCollection {
ManagedDocumentResult& previous, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock) = 0; std::function<Result(void)> callbackDuringLock) = 0;
virtual Result remove(arangodb::transaction::Methods* trx, virtual Result remove(
arangodb::velocypack::Slice slice, transaction::Methods& trx,
arangodb::ManagedDocumentResult& previous, velocypack::Slice slice,
ManagedDocumentResult& previous,
OperationOptions& options, OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick, bool lock, TRI_voc_tick_t& resultMarkerTick,
TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
KeyLockInfo* keyLockInfo, KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock) = 0; std::function<Result(void)> callbackDuringLock
) = 0;
protected: protected:
PhysicalCollection( PhysicalCollection(

View File

@ -2410,9 +2410,17 @@ OperationResult transaction::Methods::removeLocal(
TRI_ASSERT(needsLock == !isLocked(collection, AccessMode::Type::WRITE)); TRI_ASSERT(needsLock == !isLocked(collection, AccessMode::Type::WRITE));
Result res = auto res = collection->remove(
collection->remove(this, value, options, resultMarkerTick, needsLock, *this,
actualRevision, previous, &keyLockInfo, updateFollowers); value,
options,
resultMarkerTick,
needsLock,
actualRevision,
previous,
&keyLockInfo,
updateFollowers
);
if (resultMarkerTick > 0 && resultMarkerTick > maxTick) { if (resultMarkerTick > 0 && resultMarkerTick > maxTick) {
maxTick = resultMarkerTick; maxTick = resultMarkerTick;
@ -2622,11 +2630,13 @@ OperationResult transaction::Methods::truncateLocal(
TRI_ASSERT(isLocked(collection, AccessMode::Type::WRITE)); TRI_ASSERT(isLocked(collection, AccessMode::Type::WRITE));
Result res = collection->truncate(this, options); auto res = collection->truncate(*this, options);;
if (res.fail()) { if (res.fail()) {
if (lockResult.is(TRI_ERROR_LOCKED)) { if (lockResult.is(TRI_ERROR_LOCKED)) {
unlockRecursive(cid, AccessMode::Type::WRITE); unlockRecursive(cid, AccessMode::Type::WRITE);
} }
return OperationResult(res); return OperationResult(res);
} }

View File

@ -907,11 +907,14 @@ Result LogicalCollection::read(transaction::Methods* trx, arangodb::velocypack::
/// the read-cache /// the read-cache
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
Result LogicalCollection::truncate(transaction::Methods* trx, Result LogicalCollection::truncate(
OperationOptions& options) { transaction::Methods& trx,
OperationOptions& options
) {
TRI_IF_FAILURE("LogicalCollection::truncate") { TRI_IF_FAILURE("LogicalCollection::truncate") {
return Result(TRI_ERROR_DEBUG); return Result(TRI_ERROR_DEBUG);
} }
return getPhysical()->truncate(trx, options); return getPhysical()->truncate(trx, options);
} }
@ -983,11 +986,16 @@ Result LogicalCollection::replace(
/// @brief removes a document or edge /// @brief removes a document or edge
Result LogicalCollection::remove( Result LogicalCollection::remove(
transaction::Methods* trx, VPackSlice const slice, transaction::Methods& trx,
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, bool lock, velocypack::Slice const slice,
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous, OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous,
KeyLockInfo* keyLockInfo, KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock) { std::function<Result(void)> callbackDuringLock
) {
TRI_IF_FAILURE("LogicalCollection::remove") { TRI_IF_FAILURE("LogicalCollection::remove") {
return Result(TRI_ERROR_DEBUG); return Result(TRI_ERROR_DEBUG);
} }

View File

@ -276,7 +276,7 @@ class LogicalCollection : public LogicalDataSource {
ManagedDocumentResult& result, bool); ManagedDocumentResult& result, bool);
/// @brief processes a truncate operation /// @brief processes a truncate operation
Result truncate(transaction::Methods* trx, OperationOptions&); Result truncate(transaction::Methods& trx, OperationOptions& options);
// convenience function for downwards-compatibility // convenience function for downwards-compatibility
Result insert(transaction::Methods* trx, velocypack::Slice const slice, Result insert(transaction::Methods* trx, velocypack::Slice const slice,
@ -311,11 +311,17 @@ class LogicalCollection : public LogicalDataSource {
ManagedDocumentResult& previous, ManagedDocumentResult& previous,
std::function<Result(void)> callbackDuringLock); std::function<Result(void)> callbackDuringLock);
Result remove(transaction::Methods*, velocypack::Slice, Result remove(
OperationOptions&, TRI_voc_tick_t&, bool lock, transaction::Methods& trx,
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous, velocypack::Slice slice,
OperationOptions& options,
TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
ManagedDocumentResult& previous,
KeyLockInfo* keyLockInfo, KeyLockInfo* keyLockInfo,
std::function<Result(void)> callbackDuringLock); std::function<Result(void)> callbackDuringLock
);
bool readDocument(transaction::Methods* trx, bool readDocument(transaction::Methods* trx,
LocalDocumentId const& token, LocalDocumentId const& token,

View File

@ -547,6 +547,7 @@ SECTION("test_text_features") {
} }
SECTION("test_persistence") { SECTION("test_persistence") {
static std::vector<std::string> const EMPTY;
auto* database = arangodb::application_features::ApplicationServer::lookupFeature< auto* database = arangodb::application_features::ApplicationServer::lookupFeature<
arangodb::SystemDatabaseFeature arangodb::SystemDatabaseFeature
>(); >();
@ -701,7 +702,14 @@ SECTION("test_persistence") {
arangodb::OperationOptions options; arangodb::OperationOptions options;
arangodb::ManagedDocumentResult result; arangodb::ManagedDocumentResult result;
auto collection = vocbase->lookupCollection("_iresearch_analyzers"); auto collection = vocbase->lookupCollection("_iresearch_analyzers");
collection->truncate(nullptr, options); arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((collection->truncate(trx, options).ok()));
} }
{ {

View File

@ -498,7 +498,7 @@ SECTION("test_drop") {
} }
CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView))); CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView)));
CHECK((TRI_ERROR_NO_ERROR == link0->drop())); CHECK((link0->drop().ok()));
CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView))); CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView)));
// collection not in view after // collection not in view after
@ -649,7 +649,6 @@ SECTION("test_write") {
REQUIRE((false == !link && created)); REQUIRE((false == !link && created));
auto reader = irs::directory_reader::open(directory); auto reader = irs::directory_reader::open(directory);
CHECK((0 == reader.reopen().live_docs_count())); CHECK((0 == reader.reopen().live_docs_count()));
CHECK((TRI_ERROR_BAD_PARAMETER == link->insert(nullptr, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).errorNumber()));
{ {
arangodb::transaction::Methods trx( arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(vocbase), arangodb::transaction::StandaloneContext::Create(vocbase),
@ -659,7 +658,7 @@ SECTION("test_write") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -676,7 +675,7 @@ SECTION("test_write") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -693,7 +692,7 @@ SECTION("test_write") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->remove(&trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->remove(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }

View File

@ -461,7 +461,7 @@ SECTION("test_cleanup") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -479,7 +479,7 @@ SECTION("test_cleanup") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->remove(&trx, arangodb::LocalDocumentId(0), arangodb::velocypack::Slice::emptyObjectSlice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->remove(trx, arangodb::LocalDocumentId(0), arangodb::velocypack::Slice::emptyObjectSlice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -668,7 +668,7 @@ SECTION("test_drop_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -743,7 +743,7 @@ SECTION("test_drop_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -818,7 +818,7 @@ SECTION("test_drop_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -910,7 +910,7 @@ SECTION("test_drop_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -997,7 +997,7 @@ SECTION("test_drop_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -1158,7 +1158,7 @@ SECTION("test_truncate_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -1233,7 +1233,7 @@ SECTION("test_truncate_cid") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -1619,10 +1619,10 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -1675,8 +1675,8 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
link->batchInsert(&trx, batch, taskQueuePtr); link->batchInsert(trx, batch, taskQueuePtr);
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status())); CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
@ -1722,10 +1722,10 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
} }
@ -1772,10 +1772,10 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -1822,7 +1822,7 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -1872,8 +1872,8 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
link->batchInsert(&trx, batch, taskQueuePtr); link->batchInsert(trx, batch, taskQueuePtr);
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status())); CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK(view->commit().ok()); CHECK(view->commit().ok());
@ -1926,8 +1926,8 @@ SECTION("test_insert") {
linkMeta._includeAllFields = true; linkMeta._includeAllFields = true;
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
link->batchInsert(&trx, batch, taskQueuePtr); link->batchInsert(trx, batch, taskQueuePtr);
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status())); CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -2023,7 +2023,7 @@ SECTION("test_query") {
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
for (size_t i = 0; i < 12; ++i) { for (size_t i = 0; i < 12; ++i) {
CHECK((link->insert(&trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
} }
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
@ -2489,7 +2489,7 @@ SECTION("test_unregister_link") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK((view->commit().ok())); CHECK((view->commit().ok()));
} }
@ -2594,7 +2594,7 @@ SECTION("test_unregister_link") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
CHECK((view->commit().ok())); CHECK((view->commit().ok()));
} }
@ -2873,7 +2873,7 @@ SECTION("test_tracked_cids") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
feature->executeCallbacks(); // commit to persisted store feature->executeCallbacks(); // commit to persisted store
} }
@ -3271,7 +3271,7 @@ SECTION("test_transaction_snapshot") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }
@ -3351,7 +3351,7 @@ SECTION("test_transaction_snapshot") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }

View File

@ -573,7 +573,7 @@ SECTION("test_query") {
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
for (size_t i = 0; i < 12; ++i) { for (size_t i = 0; i < 12; ++i) {
CHECK((link->insert(&trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
} }
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
@ -971,7 +971,7 @@ SECTION("test_transaction_snapshot") {
arangodb::transaction::Options() arangodb::transaction::Options()
); );
CHECK((trx.begin().ok())); CHECK((trx.begin().ok()));
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok())); CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok())); CHECK((trx.commit().ok()));
} }

View File

@ -208,7 +208,7 @@ class EdgeIndexMock final : public arangodb::Index {
} }
arangodb::Result insert( arangodb::Result insert(
arangodb::transaction::Methods*, arangodb::transaction::Methods& trx,
arangodb::LocalDocumentId const& documentId, arangodb::LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc, arangodb::velocypack::Slice const& doc,
OperationMode OperationMode
@ -236,7 +236,7 @@ class EdgeIndexMock final : public arangodb::Index {
} }
arangodb::Result remove( arangodb::Result remove(
arangodb::transaction::Methods*, arangodb::transaction::Methods& trx,
arangodb::LocalDocumentId const&, arangodb::LocalDocumentId const&,
arangodb::velocypack::Slice const& doc, arangodb::velocypack::Slice const& doc,
OperationMode OperationMode
@ -594,7 +594,7 @@ std::shared_ptr<arangodb::Index> PhysicalCollectionMock::createIndex(arangodb::v
auto res = trx.begin(); auto res = trx.begin();
TRI_ASSERT(res.ok()); TRI_ASSERT(res.ok());
index->batchInsert(&trx, docs, taskQueuePtr); index->batchInsert(trx, docs, taskQueuePtr);
if (TRI_ERROR_NO_ERROR != taskQueue.status()) { if (TRI_ERROR_NO_ERROR != taskQueue.status()) {
return nullptr; return nullptr;
@ -622,7 +622,7 @@ bool PhysicalCollectionMock::dropIndex(TRI_idx_iid_t iid) {
for (auto itr = _indexes.begin(), end = _indexes.end(); itr != end; ++itr) { for (auto itr = _indexes.begin(), end = _indexes.end(); itr != end; ++itr) {
if ((*itr)->id() == iid) { if ((*itr)->id() == iid) {
if (TRI_ERROR_NO_ERROR == (*itr)->drop()) { if ((*itr)->drop().ok()) {
_indexes.erase(itr); return true; _indexes.erase(itr); return true;
} }
} }
@ -685,7 +685,7 @@ arangodb::Result PhysicalCollectionMock::insert(
result.setUnmanaged(documents.back().first.data(), docId); result.setUnmanaged(documents.back().first.data(), docId);
for (auto& index : _indexes) { for (auto& index : _indexes) {
if (!index->insert(trx, docId, newSlice, arangodb::Index::OperationMode::normal).ok()) { if (!index->insert(*trx, docId, newSlice, arangodb::Index::OperationMode::normal).ok()) {
return arangodb::Result(TRI_ERROR_BAD_PARAMETER); return arangodb::Result(TRI_ERROR_BAD_PARAMETER);
} }
} }
@ -859,12 +859,17 @@ bool PhysicalCollectionMock::readDocumentWithCallback(arangodb::transaction::Met
} }
arangodb::Result PhysicalCollectionMock::remove( arangodb::Result PhysicalCollectionMock::remove(
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice, arangodb::transaction::Methods& trx,
arangodb::velocypack::Slice slice,
arangodb::ManagedDocumentResult& previous, arangodb::ManagedDocumentResult& previous,
arangodb::OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, arangodb::OperationOptions& options,
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
arangodb::KeyLockInfo* /*keyLockInfo*/, arangodb::KeyLockInfo* /*keyLockInfo*/,
std::function<arangodb::Result(void)> callbackDuringLock) { std::function<arangodb::Result(void)> callbackDuringLock
) {
TRI_ASSERT(callbackDuringLock == nullptr); // not implemented TRI_ASSERT(callbackDuringLock == nullptr); // not implemented
before(); before();
@ -920,7 +925,10 @@ void PhysicalCollectionMock::setPath(std::string const& value) {
physicalPath = value; physicalPath = value;
} }
arangodb::Result PhysicalCollectionMock::truncate(arangodb::transaction::Methods*, arangodb::OperationOptions&) { arangodb::Result PhysicalCollectionMock::truncate(
arangodb::transaction::Methods& trx,
arangodb::OperationOptions& options
) {
before(); before();
documents.clear(); documents.clear();
return arangodb::Result(); return arangodb::Result();

View File

@ -90,12 +90,17 @@ class PhysicalCollectionMock: public arangodb::PhysicalCollection {
virtual bool readDocument(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::ManagedDocumentResult& result) const override; virtual bool readDocument(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::ManagedDocumentResult& result) const override;
virtual bool readDocumentWithCallback(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::IndexIterator::DocumentCallback const& cb) const override; virtual bool readDocumentWithCallback(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::IndexIterator::DocumentCallback const& cb) const override;
virtual arangodb::Result remove( virtual arangodb::Result remove(
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice, arangodb::transaction::Methods& trx,
arangodb::velocypack::Slice slice,
arangodb::ManagedDocumentResult& previous, arangodb::ManagedDocumentResult& previous,
arangodb::OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, arangodb::OperationOptions& options,
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId, TRI_voc_tick_t& resultMarkerTick,
bool lock,
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId,
arangodb::KeyLockInfo* /*keyLockInfo*/, arangodb::KeyLockInfo* /*keyLockInfo*/,
std::function<arangodb::Result(void)> callbackDuringLock) override; std::function<arangodb::Result(void)> callbackDuringLock
) override;
virtual arangodb::Result replace( virtual arangodb::Result replace(
arangodb::transaction::Methods* trx, arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice, arangodb::velocypack::Slice const newSlice,
@ -106,7 +111,10 @@ class PhysicalCollectionMock: public arangodb::PhysicalCollection {
std::function<arangodb::Result(void)> callbackDuringLock) override; std::function<arangodb::Result(void)> callbackDuringLock) override;
virtual TRI_voc_rid_t revision(arangodb::transaction::Methods* trx) const override; virtual TRI_voc_rid_t revision(arangodb::transaction::Methods* trx) const override;
virtual void setPath(std::string const&) override; virtual void setPath(std::string const&) override;
virtual arangodb::Result truncate(arangodb::transaction::Methods* trx, arangodb::OperationOptions&) override; virtual arangodb::Result truncate(
arangodb::transaction::Methods& trx,
arangodb::OperationOptions& options
) override;
virtual arangodb::Result update( virtual arangodb::Result update(
arangodb::transaction::Methods* trx, arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice, arangodb::velocypack::Slice const newSlice,