mirror of https://gitee.com/bigwinds/arangodb
issue 511.2.1: use references instead of raw pointers for Index operations to avoid null pointer access (#7725)
This commit is contained in:
parent
5153ea0807
commit
f1245af554
|
@ -477,8 +477,10 @@ void ClusterCollection::invokeOnAllElements(
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
Result ClusterCollection::truncate(transaction::Methods* trx,
|
||||
OperationOptions& options) {
|
||||
Result ClusterCollection::truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) {
|
||||
return Result(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
@ -537,11 +539,17 @@ Result ClusterCollection::replace(
|
|||
}
|
||||
|
||||
Result ClusterCollection::remove(
|
||||
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous, OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick, bool, TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId, KeyLockInfo* /*keyLock*/,
|
||||
std::function<Result(void)> /*callbackDuringLock*/) {
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* /*keyLock*/,
|
||||
std::function<Result(void)> /*callbackDuringLock*/
|
||||
) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,10 @@ class ClusterCollection final : public PhysicalCollection {
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
Result truncate(transaction::Methods* trx, OperationOptions&) override;
|
||||
Result truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) override;
|
||||
|
||||
void deferDropCollection(
|
||||
std::function<bool(LogicalCollection&)> const& callback
|
||||
|
@ -175,13 +178,18 @@ class ClusterCollection final : public PhysicalCollection {
|
|||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
|
||||
Result remove(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) override;
|
||||
|
||||
protected:
|
||||
/// @brief Inject figures that are specific to StorageEngine
|
||||
|
|
|
@ -68,7 +68,7 @@ class ClusterIndex : public Index {
|
|||
bool hasSelectivityEstimate() const override;
|
||||
|
||||
double selectivityEstimate(arangodb::StringRef const& = arangodb::StringRef()) const override;
|
||||
|
||||
|
||||
/// @brief update the cluster selectivity estimate
|
||||
void updateClusterSelectivityEstimate(double estimate) override;
|
||||
|
||||
|
@ -76,7 +76,25 @@ class ClusterIndex : public Index {
|
|||
void unload() override {}
|
||||
size_t memory() const override { return 0; }
|
||||
|
||||
int drop() override { return TRI_ERROR_NOT_IMPLEMENTED; }
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override {
|
||||
return Result(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override {
|
||||
return Result(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
Result drop() override { return Result(TRI_ERROR_NOT_IMPLEMENTED); }
|
||||
|
||||
bool hasCoveringIterator() const override;
|
||||
|
||||
|
@ -96,21 +114,23 @@ class ClusterIndex : public Index {
|
|||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
|
||||
virtual arangodb::IndexIterator* iteratorForCondition(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::ManagedDocumentResult* result,
|
||||
arangodb::aql::AstNode const* condNode,
|
||||
arangodb::aql::Variable const* var,
|
||||
arangodb::IndexIteratorOptions const& opts
|
||||
) override {
|
||||
TRI_ASSERT(false); // should not be called
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// @brief provides a size hint for the index
|
||||
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final {
|
||||
// nothing to do here
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
Result insert(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc, OperationMode mode) override {
|
||||
return TRI_ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
Result remove(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode) override {
|
||||
return TRI_ERROR_NOT_IMPLEMENTED;
|
||||
Result sizeHint(
|
||||
transaction::Methods& /*trx*/,
|
||||
size_t /*size*/
|
||||
) override final {
|
||||
return Result(); // nothing to do here
|
||||
}
|
||||
|
||||
void updateProperties(velocypack::Slice const&);
|
||||
|
@ -123,4 +143,4 @@ class ClusterIndex : public Index {
|
|||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -290,7 +290,7 @@ void IResearchLink::afterTruncate() {
|
|||
}
|
||||
|
||||
void IResearchLink::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
arangodb::transaction::Methods& trx,
|
||||
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& batch,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
) {
|
||||
|
@ -302,15 +302,7 @@ void IResearchLink::batchInsert(
|
|||
throw std::runtime_error(std::string("failed to report status during batch insert for arangosearch link '") + arangodb::basics::StringUtils::itoa(_id) + "'");
|
||||
}
|
||||
|
||||
if (!trx) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get transaction while inserting a document into arangosearch link '" << id() << "'";
|
||||
queue->setStatus(TRI_ERROR_BAD_PARAMETER); // 'trx' required
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (!trx->state()) {
|
||||
if (!trx.state()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to get transaction state while inserting a document into arangosearch link '" << id() << "'";
|
||||
queue->setStatus(TRI_ERROR_BAD_PARAMETER); // transaction state required
|
||||
|
@ -318,7 +310,7 @@ void IResearchLink::batchInsert(
|
|||
return;
|
||||
}
|
||||
|
||||
auto& state = *(trx->state());
|
||||
auto& state = *(trx.state());
|
||||
auto* key = this;
|
||||
|
||||
// TODO FIXME find a better way to look up a ViewState
|
||||
|
@ -348,7 +340,7 @@ void IResearchLink::batchInsert(
|
|||
ctx = ptr.get();
|
||||
state.cookie(key, std::move(ptr));
|
||||
|
||||
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) {
|
||||
if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to store state into a TransactionState for batch insert into arangosearch link '" << id() << "', tid '" << state.id() << "'";
|
||||
queue->setStatus(TRI_ERROR_INTERNAL);
|
||||
|
@ -872,26 +864,19 @@ arangodb::Result IResearchLink::initDataStore(IResearchView const& view) {
|
|||
}
|
||||
|
||||
arangodb::Result IResearchLink::insert(
|
||||
transaction::Methods* trx,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
Index::OperationMode mode
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) {
|
||||
if (!trx) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
std::string("failed to get transaction while inserting a document into arangosearch link '") + std::to_string(id()) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
if (!trx->state()) {
|
||||
if (!trx.state()) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
std::string("failed to get transaction state while inserting a document into arangosearch link '") + std::to_string(id()) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
auto& state = *(trx->state());
|
||||
auto& state = *(trx.state());
|
||||
auto* key = this;
|
||||
|
||||
// TODO FIXME find a better way to look up a ViewState
|
||||
|
@ -920,7 +905,7 @@ arangodb::Result IResearchLink::insert(
|
|||
ctx = ptr.get();
|
||||
state.cookie(key, std::move(ptr));
|
||||
|
||||
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) {
|
||||
if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_INTERNAL,
|
||||
std::string("failed to store state into a TransactionState for insert into arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'"
|
||||
|
@ -1049,26 +1034,19 @@ size_t IResearchLink::memory() const {
|
|||
}
|
||||
|
||||
arangodb::Result IResearchLink::remove(
|
||||
transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& /*doc*/,
|
||||
Index::OperationMode /*mode*/
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& /*doc*/,
|
||||
arangodb::Index::OperationMode /*mode*/
|
||||
) {
|
||||
if (!trx) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
std::string("failed to get transaction while removing a document from arangosearch link '") + std::to_string(id()) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
if (!trx->state()) {
|
||||
if (!trx.state()) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_BAD_PARAMETER,
|
||||
std::string("failed to get transaction state while removing a document into arangosearch link '") + std::to_string(id()) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
auto& state = *(trx->state());
|
||||
auto& state = *(trx.state());
|
||||
auto* key = this;
|
||||
|
||||
// TODO FIXME find a better way to look up a ViewState
|
||||
|
@ -1097,7 +1075,7 @@ arangodb::Result IResearchLink::remove(
|
|||
ctx = ptr.get();
|
||||
state.cookie(key, std::move(ptr));
|
||||
|
||||
if (!ctx || !trx->addStatusChangeCallback(&_trxCallback)) {
|
||||
if (!ctx || !trx.addStatusChangeCallback(&_trxCallback)) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_INTERNAL,
|
||||
std::string("failed to store state into a TransactionState for remove from arangosearch link '") + std::to_string(id()) + "', tid '" + std::to_string(state.id()) + "', revision '" + std::to_string(documentId.id()) + "'"
|
||||
|
|
|
@ -94,8 +94,8 @@ class IResearchLink {
|
|||
/// '_meta' params
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
virtual void batchInsert(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& batch,
|
||||
arangodb::transaction::Methods& trx,
|
||||
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& batch,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
); // arangodb::Index override
|
||||
|
||||
|
@ -133,10 +133,10 @@ class IResearchLink {
|
|||
/// @brief insert an ArangoDB document into an iResearch View using '_meta' params
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result insert(
|
||||
transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
Index::OperationMode mode
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
); // arangodb::Index override
|
||||
|
||||
bool isPersistent() const; // arangodb::Index override
|
||||
|
@ -176,10 +176,10 @@ class IResearchLink {
|
|||
/// @brief remove an ArangoDB document from an iResearch View
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result remove(
|
||||
transaction::Methods* trx,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
Index::OperationMode mode
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
); // arangodb::Index override
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -72,7 +72,7 @@ class IResearchLinkCoordinator final
|
|||
}
|
||||
|
||||
virtual void batchInsert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
) override {
|
||||
|
@ -81,7 +81,7 @@ class IResearchLinkCoordinator final
|
|||
|
||||
virtual bool canBeDropped() const override { return true; }
|
||||
|
||||
virtual int drop() override { return TRI_ERROR_NO_ERROR; }
|
||||
virtual arangodb::Result drop() override { return arangodb::Result(); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the factory for this type of index
|
||||
|
@ -94,7 +94,7 @@ class IResearchLinkCoordinator final
|
|||
virtual bool hasSelectivityEstimate() const override { return false; }
|
||||
|
||||
virtual arangodb::Result insert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode
|
||||
|
@ -108,6 +108,17 @@ class IResearchLinkCoordinator final
|
|||
// IResearch does not provide a fixed default sort order
|
||||
virtual bool isSorted() const override { return false; }
|
||||
|
||||
virtual arangodb::IndexIterator* iteratorForCondition(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::ManagedDocumentResult* result,
|
||||
arangodb::aql::AstNode const* condNode,
|
||||
arangodb::aql::Variable const* var,
|
||||
arangodb::IndexIteratorOptions const& opts
|
||||
) override {
|
||||
TRI_ASSERT(false); // should not be called
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual void load() override { /* NOOP */ }
|
||||
|
||||
virtual bool matchesDefinition(
|
||||
|
@ -117,7 +128,7 @@ class IResearchLinkCoordinator final
|
|||
virtual size_t memory() const override { return _meta.memory(); }
|
||||
|
||||
arangodb::Result remove(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode
|
||||
|
|
|
@ -47,7 +47,7 @@ class IResearchMMFilesLink final
|
|||
};
|
||||
|
||||
virtual void batchInsert(
|
||||
transaction::Methods* trx,
|
||||
arangodb::transaction::Methods& trx,
|
||||
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
) override {
|
||||
|
@ -58,9 +58,7 @@ class IResearchMMFilesLink final
|
|||
return IResearchLink::canBeDropped();
|
||||
}
|
||||
|
||||
virtual int drop() override {
|
||||
return IResearchLink::drop().errorNumber();
|
||||
}
|
||||
virtual arangodb::Result drop() override { return IResearchLink::drop(); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the factory for this type of index
|
||||
|
@ -76,10 +74,10 @@ class IResearchMMFilesLink final
|
|||
}
|
||||
|
||||
virtual arangodb::Result insert(
|
||||
transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override {
|
||||
return IResearchLink::insert(trx, documentId, doc, mode);
|
||||
}
|
||||
|
@ -92,6 +90,17 @@ class IResearchMMFilesLink final
|
|||
return IResearchLink::isSorted();
|
||||
}
|
||||
|
||||
virtual arangodb::IndexIterator* iteratorForCondition(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::ManagedDocumentResult* result,
|
||||
arangodb::aql::AstNode const* condNode,
|
||||
arangodb::aql::Variable const* var,
|
||||
arangodb::IndexIteratorOptions const& opts
|
||||
) override {
|
||||
TRI_ASSERT(false); // should not be called
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual void load() override {
|
||||
IResearchLink::load();
|
||||
}
|
||||
|
@ -107,8 +116,8 @@ class IResearchMMFilesLink final
|
|||
}
|
||||
|
||||
arangodb::Result remove(
|
||||
transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode
|
||||
) override {
|
||||
|
|
|
@ -47,7 +47,7 @@ class IResearchRocksDBLink final
|
|||
};
|
||||
|
||||
virtual void batchInsert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<arangodb::LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
) override {
|
||||
|
@ -58,10 +58,10 @@ class IResearchRocksDBLink final
|
|||
return IResearchLink::canBeDropped();
|
||||
}
|
||||
|
||||
virtual int drop() override {
|
||||
virtual Result drop() override {
|
||||
writeRocksWalMarker();
|
||||
|
||||
return IResearchLink::drop().errorNumber();
|
||||
return IResearchLink::drop();
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -78,11 +78,11 @@ class IResearchRocksDBLink final
|
|||
}
|
||||
|
||||
virtual arangodb::Result insertInternal(
|
||||
transaction::Methods* trx,
|
||||
arangodb::RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
const arangodb::velocypack::Slice& doc,
|
||||
OperationMode mode
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::RocksDBMethods* methods,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override {
|
||||
return IResearchLink::insert(trx, documentId, doc, mode);
|
||||
}
|
||||
|
@ -91,6 +91,17 @@ class IResearchRocksDBLink final
|
|||
return IResearchLink::isSorted();
|
||||
}
|
||||
|
||||
virtual arangodb::IndexIterator* iteratorForCondition(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::ManagedDocumentResult* result,
|
||||
arangodb::aql::AstNode const* condNode,
|
||||
arangodb::aql::Variable const* var,
|
||||
arangodb::IndexIteratorOptions const& opts
|
||||
) override {
|
||||
TRI_ASSERT(false); // should not be called
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual void load() override {
|
||||
IResearchLink::load();
|
||||
}
|
||||
|
@ -106,11 +117,11 @@ class IResearchRocksDBLink final
|
|||
}
|
||||
|
||||
virtual arangodb::Result removeInternal(
|
||||
transaction::Methods* trx,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
const arangodb::velocypack::Slice& doc,
|
||||
OperationMode mode
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override {
|
||||
return IResearchLink::remove(trx, documentId, doc, mode);
|
||||
}
|
||||
|
|
|
@ -333,12 +333,7 @@ void IResearchRocksDBRecoveryHelper::PutCF(uint32_t column_family_id,
|
|||
continue; // index was already populated when it was created
|
||||
}
|
||||
|
||||
link->insert(
|
||||
&trx,
|
||||
docId,
|
||||
doc,
|
||||
Index::OperationMode::internal
|
||||
);
|
||||
link->insert(trx, docId, doc, arangodb::Index::OperationMode::internal);
|
||||
}
|
||||
|
||||
trx.commit();
|
||||
|
@ -377,10 +372,10 @@ void IResearchRocksDBRecoveryHelper::handleDeleteCF(uint32_t column_family_id,
|
|||
|
||||
for (auto link : links) {
|
||||
link->remove(
|
||||
&trx,
|
||||
trx,
|
||||
docId,
|
||||
arangodb::velocypack::Slice::emptyObjectSlice(),
|
||||
Index::OperationMode::internal
|
||||
arangodb::Index::OperationMode::internal
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -550,7 +550,7 @@ bool Index::implicitlyUnique() const {
|
|||
}
|
||||
|
||||
void Index::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
for (auto const& it : documents) {
|
||||
|
@ -563,15 +563,13 @@ void Index::batchInsert(
|
|||
}
|
||||
|
||||
/// @brief default implementation for drop
|
||||
int Index::drop() {
|
||||
// do nothing
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
Result Index::drop() {
|
||||
return Result(); // do nothing
|
||||
}
|
||||
|
||||
/// @brief default implementation for sizeHint
|
||||
int Index::sizeHint(transaction::Methods*, size_t) {
|
||||
// do nothing
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
Result Index::sizeHint(transaction::Methods& trx, size_t size) {
|
||||
return Result(); // do nothing
|
||||
}
|
||||
|
||||
/// @brief default implementation for hasBatchInsert
|
||||
|
|
|
@ -302,32 +302,38 @@ class Index {
|
|||
virtual void toVelocyPackFigures(arangodb::velocypack::Builder&) const;
|
||||
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPackFigures() const;
|
||||
|
||||
virtual Result insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
virtual Result remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
|
||||
virtual void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& docs,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
);
|
||||
|
||||
virtual Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode
|
||||
) = 0;
|
||||
|
||||
virtual Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode
|
||||
) = 0;
|
||||
|
||||
virtual void load() = 0;
|
||||
virtual void unload() = 0;
|
||||
|
||||
// called when the index is dropped
|
||||
virtual int drop();
|
||||
virtual Result drop();
|
||||
|
||||
/// @brief called after the collection was truncated
|
||||
/// @param tick at which truncate was applied
|
||||
virtual void afterTruncate(TRI_voc_tick_t tick) {};
|
||||
|
||||
// give index a hint about the expected size
|
||||
virtual int sizeHint(transaction::Methods*, size_t);
|
||||
virtual Result sizeHint(transaction::Methods& trx, size_t size);
|
||||
|
||||
virtual bool hasBatchInsert() const;
|
||||
|
||||
|
@ -343,13 +349,13 @@ class Index {
|
|||
virtual arangodb::aql::AstNode* specializeCondition(arangodb::aql::AstNode*,
|
||||
arangodb::aql::Variable const*) const;
|
||||
|
||||
virtual IndexIterator* iteratorForCondition(transaction::Methods*,
|
||||
ManagedDocumentResult*,
|
||||
arangodb::aql::AstNode const*,
|
||||
arangodb::aql::Variable const*,
|
||||
IndexIteratorOptions const&) {
|
||||
return nullptr; // IResearch will never use this
|
||||
};
|
||||
virtual IndexIterator* iteratorForCondition(
|
||||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* result,
|
||||
aql::AstNode const* condNode,
|
||||
aql::Variable const* var,
|
||||
IndexIteratorOptions const& opts
|
||||
) = 0;
|
||||
|
||||
bool canUseConditionPart(arangodb::aql::AstNode const* access,
|
||||
arangodb::aql::AstNode const* other,
|
||||
|
|
|
@ -120,7 +120,9 @@ namespace {
|
|||
class MMFilesIndexFillerTask : public basics::LocalTask {
|
||||
public:
|
||||
MMFilesIndexFillerTask(
|
||||
std::shared_ptr<basics::LocalTaskQueue> const& queue, transaction::Methods* trx, Index* idx,
|
||||
std::shared_ptr<basics::LocalTaskQueue> const& queue,
|
||||
transaction::Methods& trx,
|
||||
Index* idx,
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> const& documents)
|
||||
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
|
||||
|
||||
|
@ -137,7 +139,7 @@ class MMFilesIndexFillerTask : public basics::LocalTask {
|
|||
}
|
||||
|
||||
private:
|
||||
transaction::Methods* _trx;
|
||||
transaction::Methods& _trx;
|
||||
Index* _idx;
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> _documents;
|
||||
};
|
||||
|
@ -1633,8 +1635,10 @@ bool MMFilesCollection::tryLockForCompaction() {
|
|||
void MMFilesCollection::finishCompaction() { _compactionLock.unlock(); }
|
||||
|
||||
/// @brief iterator for index open
|
||||
bool MMFilesCollection::openIndex(VPackSlice const& description,
|
||||
transaction::Methods* trx) {
|
||||
bool MMFilesCollection::openIndex(
|
||||
velocypack::Slice const& description,
|
||||
transaction::Methods& trx
|
||||
) {
|
||||
// VelocyPack must be an index description
|
||||
if (!description.isObject()) {
|
||||
return false;
|
||||
|
@ -1653,10 +1657,12 @@ bool MMFilesCollection::openIndex(VPackSlice const& description,
|
|||
|
||||
/// @brief initializes an index with a set of existing documents
|
||||
void MMFilesCollection::fillIndex(
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue, transaction::Methods* trx,
|
||||
arangodb::Index* idx,
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> documents,
|
||||
bool skipPersistent) {
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue,
|
||||
transaction::Methods& trx,
|
||||
Index* idx,
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, velocypack::Slice>>> documents,
|
||||
bool skipPersistent
|
||||
) {
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
if (!useSecondaryIndexes()) {
|
||||
|
@ -1680,16 +1686,17 @@ void MMFilesCollection::fillIndex(
|
|||
|
||||
uint32_t MMFilesCollection::indexBuckets() const { return _indexBuckets; }
|
||||
|
||||
int MMFilesCollection::fillAllIndexes(transaction::Methods* trx) {
|
||||
int MMFilesCollection::fillAllIndexes(transaction::Methods& trx) {
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
return fillIndexes(trx, _indexes);
|
||||
}
|
||||
|
||||
/// @brief Fill the given list of Indexes
|
||||
int MMFilesCollection::fillIndexes(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::shared_ptr<arangodb::Index>> const& indexes,
|
||||
bool skipPersistent) {
|
||||
bool skipPersistent
|
||||
) {
|
||||
// distribute the work to index threads plus this thread
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
size_t const n = indexes.size();
|
||||
|
@ -1777,8 +1784,7 @@ int MMFilesCollection::fillIndexes(
|
|||
uint64_t total = 0;
|
||||
|
||||
while (true) {
|
||||
MMFilesSimpleIndexElement element =
|
||||
primaryIdx->lookupSequential(trx, position, total);
|
||||
auto element = primaryIdx->lookupSequential(&trx, position, total);
|
||||
|
||||
if (!element) {
|
||||
break;
|
||||
|
@ -1937,7 +1943,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
|
|||
useSecondaryIndexes(false);
|
||||
|
||||
try {
|
||||
detectIndexes(&trx);
|
||||
detectIndexes(trx);
|
||||
useSecondaryIndexes(old);
|
||||
} catch (basics::Exception const& ex) {
|
||||
useSecondaryIndexes(old);
|
||||
|
@ -1959,7 +1965,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
|
|||
|
||||
if (!engine->inRecovery() && !engine->upgrading()) {
|
||||
// build the index structures, and fill the indexes
|
||||
fillAllIndexes(&trx);
|
||||
fillAllIndexes(trx);
|
||||
}
|
||||
|
||||
// successfully opened collection. now adjust version number
|
||||
|
@ -2234,7 +2240,6 @@ std::shared_ptr<Index> MMFilesCollection::lookupIndex(
|
|||
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slice const& info,
|
||||
bool restore, bool& created) {
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_logicalCollection.vocbase()),
|
||||
_logicalCollection,
|
||||
|
@ -2245,25 +2250,28 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slic
|
|||
if (!res.ok()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
std::shared_ptr<Index> idx = createIndex(&trx, info, restore, created);
|
||||
|
||||
auto idx = createIndex(trx, info, restore, created);
|
||||
|
||||
if (idx) {
|
||||
res = trx.commit();
|
||||
}
|
||||
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods* trx,
|
||||
VPackSlice const& info,
|
||||
bool restore,
|
||||
bool& created) {
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice const& info,
|
||||
bool restore,
|
||||
bool& created
|
||||
) {
|
||||
// prevent concurrent dropping
|
||||
// TRI_ASSERT(trx->isLocked(&_logicalCollection, AccessMode::Type::READ));
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
||||
TRI_ASSERT(info.isObject());
|
||||
std::shared_ptr<Index> idx = lookupIndex(info);
|
||||
|
||||
if (idx != nullptr) { // We already have this index.
|
||||
created = false;
|
||||
return idx;
|
||||
|
@ -2321,8 +2329,10 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// @brief Persist an index information to file
|
||||
int MMFilesCollection::saveIndex(transaction::Methods* trx,
|
||||
std::shared_ptr<arangodb::Index> idx) {
|
||||
int MMFilesCollection::saveIndex(
|
||||
transaction::Methods& trx,
|
||||
std::shared_ptr<Index> idx
|
||||
) {
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
// we cannot persist PrimaryIndex
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
@ -2823,14 +2833,16 @@ int MMFilesCollection::unlockWrite(bool useDeadlockDetector, TransactionState co
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
Result MMFilesCollection::truncate(transaction::Methods* trx,
|
||||
OperationOptions& options) {
|
||||
Result MMFilesCollection::truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) {
|
||||
auto primaryIdx = primaryIndex();
|
||||
|
||||
options.ignoreRevs = true;
|
||||
|
||||
// create remove marker
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
transaction::BuilderLeaser builder(&trx);
|
||||
|
||||
auto callback = [&](MMFilesSimpleIndexElement const& element) {
|
||||
LocalDocumentId const oldDocumentId = element.localDocumentId();
|
||||
|
@ -2841,7 +2853,10 @@ Result MMFilesCollection::truncate(transaction::Methods* trx,
|
|||
|
||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
||||
TRI_voc_rid_t revisionId;
|
||||
newObjectForRemove(trx, oldDoc, *builder.get(), options.isRestore, revisionId);
|
||||
|
||||
newObjectForRemove(
|
||||
&trx, oldDoc, *builder.get(), options.isRestore, revisionId
|
||||
);
|
||||
|
||||
Result res = removeFastPath(trx, revisionId, oldDocumentId, VPackSlice(vpack),
|
||||
options, documentId, builder->slice());
|
||||
|
@ -3007,8 +3022,16 @@ Result MMFilesCollection::insert(
|
|||
|
||||
try {
|
||||
// insert into indexes
|
||||
res = insertDocument(trx, documentId, revisionId, doc, operation,
|
||||
marker, options, options.waitForSync);
|
||||
res = insertDocument(
|
||||
*trx,
|
||||
documentId,
|
||||
revisionId,
|
||||
doc,
|
||||
operation,
|
||||
marker,
|
||||
options,
|
||||
options.waitForSync
|
||||
);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = Result(ex.code());
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -3278,8 +3301,11 @@ Result MMFilesCollection::deletePrimaryIndex(
|
|||
|
||||
/// @brief creates a new entry in the secondary indexes
|
||||
Result MMFilesCollection::insertSecondaryIndexes(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, Index::OperationMode mode) {
|
||||
arangodb::transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
// Coordinator doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_IF_FAILURE("InsertSecondaryIndexes") { return Result(TRI_ERROR_DEBUG); }
|
||||
|
@ -3324,8 +3350,11 @@ Result MMFilesCollection::insertSecondaryIndexes(
|
|||
|
||||
/// @brief deletes an entry from the secondary indexes
|
||||
Result MMFilesCollection::deleteSecondaryIndexes(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, Index::OperationMode mode) {
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
// Coordintor doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
||||
|
@ -3362,7 +3391,7 @@ Result MMFilesCollection::deleteSecondaryIndexes(
|
|||
}
|
||||
|
||||
/// @brief enumerate all indexes of the collection, but don't fill them yet
|
||||
int MMFilesCollection::detectIndexes(transaction::Methods* trx) {
|
||||
int MMFilesCollection::detectIndexes(transaction::Methods& trx) {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
VPackBuilder builder;
|
||||
|
||||
|
@ -3393,12 +3422,14 @@ int MMFilesCollection::detectIndexes(transaction::Methods* trx) {
|
|||
/// This function guarantees all or nothing,
|
||||
/// If it returns NO_ERROR all indexes are filled.
|
||||
/// If it returns an error no documents are inserted
|
||||
Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationOptions& options) {
|
||||
Result MMFilesCollection::insertIndexes(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
OperationOptions& options
|
||||
) {
|
||||
// insert into primary index first
|
||||
Result res = insertPrimaryIndex(trx, documentId, doc, options);
|
||||
auto res = insertPrimaryIndex(&trx, documentId, doc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
// insert has failed
|
||||
|
@ -3411,7 +3442,7 @@ Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
|
|||
if (res.fail()) {
|
||||
deleteSecondaryIndexes(trx, documentId, doc,
|
||||
Index::OperationMode::rollback);
|
||||
deletePrimaryIndex(trx, documentId, doc, options);
|
||||
deletePrimaryIndex(&trx, documentId, doc, options);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -3419,14 +3450,21 @@ Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
|
|||
/// @brief insert a document, low level worker
|
||||
/// the caller must make sure the write lock on the collection is held
|
||||
Result MMFilesCollection::insertDocument(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
TRI_voc_rid_t revisionId, VPackSlice const& doc,
|
||||
MMFilesDocumentOperation& operation, MMFilesWalMarker const* marker,
|
||||
OperationOptions& options, bool& waitForSync) {
|
||||
arangodb::transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
TRI_voc_rid_t revisionId,
|
||||
velocypack::Slice const& doc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options,
|
||||
bool& waitForSync
|
||||
) {
|
||||
Result res = insertIndexes(trx, documentId, doc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
}
|
||||
|
||||
operation.indexed();
|
||||
|
||||
TRI_IF_FAILURE("InsertDocumentNoOperation") { return Result(TRI_ERROR_DEBUG); }
|
||||
|
@ -3435,7 +3473,8 @@ Result MMFilesCollection::insertDocument(
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
return Result(static_cast<MMFilesTransactionState*>(trx->state())
|
||||
return Result(
|
||||
static_cast<MMFilesTransactionState*>(trx.state())
|
||||
->addOperation(documentId, revisionId, operation, marker, waitForSync));
|
||||
}
|
||||
|
||||
|
@ -3554,9 +3593,18 @@ Result MMFilesCollection::update(
|
|||
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
|
||||
MMFilesDocumentDescriptor(documentId, newDoc.begin()));
|
||||
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
|
||||
newDoc, operation, marker, options,
|
||||
options.waitForSync);
|
||||
res = updateDocument(
|
||||
*trx,
|
||||
revisionId,
|
||||
oldDocumentId,
|
||||
oldDoc,
|
||||
documentId,
|
||||
newDoc,
|
||||
operation,
|
||||
marker,
|
||||
options,
|
||||
options.waitForSync
|
||||
);
|
||||
|
||||
if (res.ok() && callbackDuringLock != nullptr) {
|
||||
res = callbackDuringLock();
|
||||
|
@ -3693,9 +3741,18 @@ Result MMFilesCollection::replace(
|
|||
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
|
||||
MMFilesDocumentDescriptor(documentId, newDoc.begin()));
|
||||
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
|
||||
newDoc, operation, marker, options,
|
||||
options.waitForSync);
|
||||
res = updateDocument(
|
||||
*trx,
|
||||
revisionId,
|
||||
oldDocumentId,
|
||||
oldDoc,
|
||||
documentId,
|
||||
newDoc,
|
||||
operation,
|
||||
marker,
|
||||
options,
|
||||
options.waitForSync
|
||||
);
|
||||
|
||||
if (res.ok() && callbackDuringLock != nullptr) {
|
||||
res = callbackDuringLock();
|
||||
|
@ -3725,17 +3782,25 @@ Result MMFilesCollection::replace(
|
|||
}
|
||||
|
||||
Result MMFilesCollection::remove(
|
||||
arangodb::transaction::Methods* trx, VPackSlice slice,
|
||||
arangodb::ManagedDocumentResult& previous, OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick, bool lock, TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId, KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock) {
|
||||
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) {
|
||||
prevRev = 0;
|
||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
||||
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
newObjectForRemove(trx, slice, *builder.get(), options.isRestore, revisionId);
|
||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
||||
transaction::BuilderLeaser builder(&trx);
|
||||
|
||||
newObjectForRemove(
|
||||
&trx, slice, *builder.get(), options.isRestore, revisionId
|
||||
);
|
||||
|
||||
TRI_IF_FAILURE("RemoveDocumentNoMarker") {
|
||||
// test what happens when no marker can be created
|
||||
|
@ -3750,7 +3815,7 @@ Result MMFilesCollection::remove(
|
|||
// create marker
|
||||
MMFilesCrudMarker removeMarker(
|
||||
TRI_DF_MARKER_VPACK_REMOVE,
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(),
|
||||
static_cast<MMFilesTransactionState*>(trx.state())->idForMarker(),
|
||||
documentId,
|
||||
builder->slice());
|
||||
MMFilesWalMarker const* marker;
|
||||
|
@ -3785,12 +3850,15 @@ Result MMFilesCollection::remove(
|
|||
&_logicalCollection, TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
bool const useDeadlockDetector =
|
||||
(lock && !trx->isSingleOperationTransaction() && !trx->state()->hasHint(transaction::Hints::Hint::NO_DLD));
|
||||
(lock
|
||||
&& !trx.isSingleOperationTransaction()
|
||||
&& !trx.state()->hasHint(transaction::Hints::Hint::NO_DLD)
|
||||
);
|
||||
arangodb::MMFilesCollectionWriteLocker collectionLocker(
|
||||
this, useDeadlockDetector, trx->state(), lock);
|
||||
this, useDeadlockDetector, trx.state(), lock);
|
||||
|
||||
// get the previous revision
|
||||
Result res = lookupDocument(trx, key, previous);
|
||||
Result res = lookupDocument(&trx, key, previous);
|
||||
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
|
@ -3804,7 +3872,8 @@ Result MMFilesCollection::remove(
|
|||
// Check old revision:
|
||||
if (!options.ignoreRevs && slice.isObject()) {
|
||||
TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice);
|
||||
res = checkRevision(trx, expectedRevisionId, oldRevisionId);
|
||||
|
||||
res = checkRevision(&trx, expectedRevisionId, oldRevisionId);
|
||||
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
|
@ -3826,7 +3895,7 @@ Result MMFilesCollection::remove(
|
|||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
res = deletePrimaryIndex(&trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
|
@ -3849,9 +3918,9 @@ Result MMFilesCollection::remove(
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
res =
|
||||
static_cast<MMFilesTransactionState*>(trx->state())
|
||||
->addOperation(documentId, revisionId, operation, marker, options.waitForSync);
|
||||
res = static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
|
||||
documentId, revisionId, operation, marker, options.waitForSync
|
||||
);
|
||||
|
||||
if (res.ok() && callbackDuringLock != nullptr) {
|
||||
res = callbackDuringLock();
|
||||
|
@ -3867,7 +3936,7 @@ Result MMFilesCollection::remove(
|
|||
}
|
||||
|
||||
if (res.fail()) {
|
||||
operation.revert(trx);
|
||||
operation.revert(&trx);
|
||||
} else {
|
||||
// store the tick that was used for removing the document
|
||||
resultMarkerTick = operation.tick();
|
||||
|
@ -3890,10 +3959,15 @@ void MMFilesCollection::deferDropCollection(
|
|||
|
||||
/// @brief rolls back a document operation
|
||||
Result MMFilesCollection::rollbackOperation(
|
||||
transaction::Methods* trx, TRI_voc_document_operation_e type,
|
||||
LocalDocumentId const& oldDocumentId, VPackSlice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId, VPackSlice const& newDoc) {
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_document_operation_e type,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc
|
||||
) {
|
||||
OperationOptions options;
|
||||
|
||||
options.indexOperationMode= Index::OperationMode::rollback;
|
||||
|
||||
if (type == TRI_VOC_DOCUMENT_OPERATION_INSERT) {
|
||||
|
@ -3903,9 +3977,11 @@ Result MMFilesCollection::rollbackOperation(
|
|||
TRI_ASSERT(!newDoc.isNone());
|
||||
|
||||
// ignore any errors we're getting from this
|
||||
deletePrimaryIndex(trx, newDocumentId, newDoc, options);
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc,
|
||||
Index::OperationMode::rollback);
|
||||
deletePrimaryIndex(&trx, newDocumentId, newDoc, options);
|
||||
deleteSecondaryIndexes(
|
||||
trx, newDocumentId, newDoc, Index::OperationMode::rollback
|
||||
);
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -3931,11 +4007,12 @@ Result MMFilesCollection::rollbackOperation(
|
|||
TRI_ASSERT(newDocumentId.empty());
|
||||
TRI_ASSERT(newDoc.isNone());
|
||||
|
||||
Result res = insertPrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
auto res = insertPrimaryIndex(&trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.ok()) {
|
||||
res = insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
res = insertSecondaryIndexes(
|
||||
trx, oldDocumentId, oldDoc, Index::OperationMode::rollback
|
||||
);
|
||||
} else {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::ENGINES)
|
||||
<< "error rolling back remove operation";
|
||||
|
@ -3951,13 +4028,15 @@ Result MMFilesCollection::rollbackOperation(
|
|||
}
|
||||
|
||||
/// @brief removes a document or edge, fast path function for database documents
|
||||
Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
VPackSlice const oldDoc,
|
||||
OperationOptions& options,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const toRemove) {
|
||||
Result MMFilesCollection::removeFastPath(
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const oldDoc,
|
||||
OperationOptions& options,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const toRemove
|
||||
) {
|
||||
TRI_IF_FAILURE("RemoveDocumentNoMarker") {
|
||||
// test what happens when no marker can be created
|
||||
return Result(TRI_ERROR_DEBUG);
|
||||
|
@ -3970,10 +4049,11 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
|||
|
||||
// create marker
|
||||
MMFilesCrudMarker removeMarker(
|
||||
TRI_DF_MARKER_VPACK_REMOVE,
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(),
|
||||
documentId,
|
||||
toRemove);
|
||||
TRI_DF_MARKER_VPACK_REMOVE,
|
||||
static_cast<MMFilesTransactionState*>(trx.state())->idForMarker(),
|
||||
documentId,
|
||||
toRemove
|
||||
);
|
||||
|
||||
MMFilesWalMarker const* marker = &removeMarker;
|
||||
|
||||
|
@ -4005,7 +4085,7 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
|||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
res = deletePrimaryIndex(&trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
|
@ -4028,9 +4108,9 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
res =
|
||||
static_cast<MMFilesTransactionState*>(trx->state())
|
||||
->addOperation(documentId, revisionId, operation, marker, options.waitForSync);
|
||||
res = static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
|
||||
documentId, revisionId, operation, marker, options.waitForSync
|
||||
);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = Result(ex.code());
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -4042,7 +4122,7 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
|||
}
|
||||
|
||||
if (res.fail()) {
|
||||
operation.revert(trx);
|
||||
operation.revert(&trx);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -4075,12 +4155,17 @@ Result MMFilesCollection::lookupDocument(transaction::Methods* trx,
|
|||
/// @brief updates an existing document, low level worker
|
||||
/// the caller must make sure the write lock on the collection is held
|
||||
Result MMFilesCollection::updateDocument(
|
||||
transaction::Methods* trx,TRI_voc_rid_t revisionId,
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
VPackSlice const& oldDoc, LocalDocumentId const& newDocumentId,
|
||||
VPackSlice const& newDoc, MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker, OperationOptions& options,
|
||||
bool& waitForSync) {
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options,
|
||||
bool& waitForSync
|
||||
) {
|
||||
// remove old document from secondary indexes
|
||||
// (it will stay in the primary index as the key won't change)
|
||||
Result res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
|
@ -4110,8 +4195,8 @@ Result MMFilesCollection::updateDocument(
|
|||
// adjusted)
|
||||
// TODO: pass key into this function so it does not have to be looked up again
|
||||
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(newDoc));
|
||||
MMFilesSimpleIndexElement* element =
|
||||
primaryIndex()->lookupKeyRef(trx, keySlice);
|
||||
auto* element = primaryIndex()->lookupKeyRef(&trx, keySlice);
|
||||
|
||||
if (element != nullptr && element->isSet()) {
|
||||
element->updateLocalDocumentId(
|
||||
newDocumentId,
|
||||
|
@ -4133,8 +4218,11 @@ Result MMFilesCollection::updateDocument(
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
return Result(static_cast<MMFilesTransactionState*>(trx->state())
|
||||
->addOperation(newDocumentId, revisionId, operation, marker, waitForSync));
|
||||
return Result(
|
||||
static_cast<MMFilesTransactionState*>(trx.state())->addOperation(
|
||||
newDocumentId, revisionId, operation, marker, waitForSync
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
void MMFilesCollection::lockKey(KeyLockInfo& keyLockInfo, VPackSlice const& key) {
|
||||
|
@ -4174,7 +4262,7 @@ void MMFilesCollection::lockKey(KeyLockInfo& keyLockInfo, VPackSlice const& key)
|
|||
// we can only get here on shutdown
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_SHUTTING_DOWN);
|
||||
}
|
||||
|
||||
|
||||
void MMFilesCollection::unlockKey(KeyLockInfo& keyLockInfo) noexcept {
|
||||
TRI_ASSERT(keyLockInfo.shouldLock);
|
||||
if (!keyLockInfo.key.empty()) {
|
||||
|
|
|
@ -243,7 +243,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
void useSecondaryIndexes(bool value) { _useSecondaryIndexes = value; }
|
||||
|
||||
int fillAllIndexes(transaction::Methods*);
|
||||
int fillAllIndexes(transaction::Methods& trx);
|
||||
|
||||
void prepareIndexes(arangodb::velocypack::Slice indexesSlice) override;
|
||||
|
||||
|
@ -258,9 +258,13 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
std::shared_ptr<Index> createIndex(arangodb::velocypack::Slice const& info,
|
||||
bool restore, bool& created) override;
|
||||
std::shared_ptr<Index> createIndex(transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const& info,
|
||||
bool restore, bool& created);
|
||||
|
||||
std::shared_ptr<Index> createIndex(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice const& info,
|
||||
bool restore,
|
||||
bool& created
|
||||
);
|
||||
|
||||
/// @brief Drop an index with the given iid.
|
||||
bool dropIndex(TRI_idx_iid_t iid) override;
|
||||
|
@ -281,7 +285,10 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
Result truncate(transaction::Methods* trx, OperationOptions&) override;
|
||||
Result truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) override;
|
||||
|
||||
/// @brief Defer a callback to be executed when the collection
|
||||
/// can be dropped. The callback is supposed to drop
|
||||
|
@ -340,19 +347,27 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
|
||||
Result remove(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) override;
|
||||
|
||||
Result rollbackOperation(transaction::Methods*, TRI_voc_document_operation_e,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc);
|
||||
Result rollbackOperation(
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_document_operation_e type,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc
|
||||
);
|
||||
|
||||
MMFilesDocumentPosition insertLocalDocumentId(LocalDocumentId const& documentId,
|
||||
uint8_t const* dataptr,
|
||||
|
@ -378,27 +393,38 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
private:
|
||||
void sizeHint(transaction::Methods* trx, int64_t hint);
|
||||
|
||||
bool openIndex(VPackSlice const& description, transaction::Methods* trx);
|
||||
bool openIndex(
|
||||
velocypack::Slice const& description,
|
||||
transaction::Methods& trx
|
||||
);
|
||||
|
||||
/// @brief initializes an index with all existing documents
|
||||
void fillIndex(std::shared_ptr<basics::LocalTaskQueue>, transaction::Methods*, Index*,
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>>,
|
||||
bool);
|
||||
void fillIndex(
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue,
|
||||
transaction::Methods& trx,
|
||||
Index* index,
|
||||
std::shared_ptr<std::vector<std::pair<LocalDocumentId, velocypack::Slice>>> docs,
|
||||
bool skipPersistent
|
||||
);
|
||||
|
||||
/// @brief Fill indexes used in recovery
|
||||
int fillIndexes(transaction::Methods*,
|
||||
std::vector<std::shared_ptr<Index>> const&,
|
||||
bool skipPersistent = true);
|
||||
int fillIndexes(
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::shared_ptr<Index>> const& indexes,
|
||||
bool skipPersistent = true
|
||||
);
|
||||
|
||||
int openWorker(bool ignoreErrors);
|
||||
|
||||
Result removeFastPath(arangodb::transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const oldDoc,
|
||||
OperationOptions& options,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const toRemove);
|
||||
Result removeFastPath(
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const oldDoc,
|
||||
OperationOptions& options,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const toRemove
|
||||
);
|
||||
|
||||
static int OpenIteratorHandleDocumentMarker(MMFilesMarker const* marker,
|
||||
MMFilesDatafile* datafile,
|
||||
|
@ -437,13 +463,16 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
MMFilesDocumentPosition lookupDocument(LocalDocumentId const& documentId) const;
|
||||
|
||||
Result insertDocument(arangodb::transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options, bool& waitForSync);
|
||||
Result insertDocument(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
TRI_voc_rid_t revisionId,
|
||||
velocypack::Slice const& doc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options,
|
||||
bool& waitForSync
|
||||
);
|
||||
|
||||
uint8_t const* lookupDocumentVPack(LocalDocumentId const& documentId) const;
|
||||
uint8_t const* lookupDocumentVPackConditional(LocalDocumentId const& documentId,
|
||||
|
@ -462,39 +491,51 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
// SECTION: Index storage
|
||||
|
||||
int saveIndex(transaction::Methods* trx,
|
||||
std::shared_ptr<arangodb::Index> idx);
|
||||
int saveIndex(transaction::Methods& trx, std::shared_ptr<Index> idx);
|
||||
|
||||
/// @brief Detect all indexes form file
|
||||
int detectIndexes(transaction::Methods* trx);
|
||||
int detectIndexes(transaction::Methods& trx);
|
||||
|
||||
Result insertIndexes(transaction::Methods* trx, LocalDocumentId const& documentId, velocypack::Slice const& doc, OperationOptions& options);
|
||||
Result insertIndexes(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
OperationOptions& options
|
||||
);
|
||||
|
||||
Result insertPrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
|
||||
|
||||
Result deletePrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
|
||||
|
||||
Result insertSecondaryIndexes(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode);
|
||||
Result insertSecondaryIndexes(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
);
|
||||
|
||||
Result deleteSecondaryIndexes(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode);
|
||||
Result deleteSecondaryIndexes(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
);
|
||||
|
||||
Result lookupDocument(transaction::Methods*, velocypack::Slice,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
Result updateDocument(transaction::Methods*, TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
MMFilesDocumentOperation&,
|
||||
MMFilesWalMarker const*, OperationOptions& options,
|
||||
bool& waitForSync);
|
||||
Result updateDocument(
|
||||
transaction::Methods& trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options,
|
||||
bool& waitForSync
|
||||
);
|
||||
|
||||
LocalDocumentId reuseOrCreateLocalDocumentId(OperationOptions const& options) const;
|
||||
|
||||
|
@ -570,4 +611,4 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -126,7 +126,9 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
|
|||
if (status != StatusType::CREATED) {
|
||||
// remove document from indexes
|
||||
try {
|
||||
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc);
|
||||
physical->rollbackOperation(
|
||||
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
|
||||
);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
@ -151,7 +153,9 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
|
|||
if (status != StatusType::CREATED) {
|
||||
try {
|
||||
// restore the old index state
|
||||
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc);
|
||||
physical->rollbackOperation(
|
||||
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
|
||||
);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
@ -159,12 +163,15 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
|
|||
// let the primary index entry point to the correct document
|
||||
MMFilesSimpleIndexElement* element = physical->primaryIndex()->lookupKeyRef(
|
||||
trx, transaction::helpers::extractKeyFromDocument(newDoc));
|
||||
|
||||
if (element != nullptr && element->isSet()) {
|
||||
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(oldDoc));
|
||||
|
||||
element->updateLocalDocumentId(oldDocumentId, static_cast<uint32_t>(keySlice.begin() - oldDoc.begin()));
|
||||
}
|
||||
|
||||
physical->updateLocalDocumentId(oldDocumentId, oldDoc.begin(), 0, false);
|
||||
|
||||
|
||||
// remove now obsolete new document
|
||||
if (oldDocumentId != newDocumentId) {
|
||||
// we need to check for the same document id here
|
||||
|
@ -176,16 +183,18 @@ void MMFilesDocumentOperation::revert(transaction::Methods* trx) {
|
|||
} else if (_type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
|
||||
TRI_ASSERT(!_oldRevision.empty());
|
||||
TRI_ASSERT(_newRevision.empty());
|
||||
|
||||
|
||||
try {
|
||||
physical->insertLocalDocumentId(_oldRevision._localDocumentId, _oldRevision._vpack, 0, true, true);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
|
||||
if (status != StatusType::CREATED) {
|
||||
try {
|
||||
// remove from indexes again
|
||||
physical->rollbackOperation(trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc);
|
||||
physical->rollbackOperation(
|
||||
*trx, _type, oldDocumentId, oldDoc, newDocumentId, newDoc
|
||||
);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -274,16 +274,17 @@ void MMFilesEdgeIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
builder.close();
|
||||
}
|
||||
|
||||
Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesEdgeIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, 1);
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
|
||||
|
||||
_edgesFrom->insert(&context, fromElement, true,
|
||||
mode == OperationMode::rollback);
|
||||
|
@ -306,16 +307,17 @@ Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesEdgeIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, 1);
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
|
||||
|
||||
try {
|
||||
_edgesFrom->remove(&context, fromElement);
|
||||
|
@ -332,7 +334,7 @@ Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
void MMFilesEdgeIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
if (documents.empty()) {
|
||||
|
@ -351,7 +353,7 @@ void MMFilesEdgeIndex::batchInsert(
|
|||
auto creator = [&trx, this]() -> void* {
|
||||
ManagedDocumentResult* result = new ManagedDocumentResult;
|
||||
|
||||
return new MMFilesIndexLookupContext(trx, &_collection, result, 1);
|
||||
return new MMFilesIndexLookupContext(&trx, &_collection, result, 1);
|
||||
};
|
||||
auto destroyer = [](void* userData) {
|
||||
MMFilesIndexLookupContext* context = static_cast<MMFilesIndexLookupContext*>(userData);
|
||||
|
@ -386,7 +388,7 @@ void MMFilesEdgeIndex::unload() {
|
|||
}
|
||||
|
||||
/// @brief provides a size hint for the edge index
|
||||
int MMFilesEdgeIndex::sizeHint(transaction::Methods* trx, size_t size) {
|
||||
Result MMFilesEdgeIndex::sizeHint(transaction::Methods& trx, size_t size) {
|
||||
// we assume this is called when setting up the index and the index
|
||||
// is still empty
|
||||
TRI_ASSERT(_edgesFrom->size() == 0);
|
||||
|
@ -394,7 +396,7 @@ int MMFilesEdgeIndex::sizeHint(transaction::Methods* trx, size_t size) {
|
|||
// set an initial size for the index for some new nodes to be created
|
||||
// without resizing
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, 1);
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, 1);
|
||||
int err = _edgesFrom->resize(&context, size + 2049);
|
||||
|
||||
if (err != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -167,20 +167,30 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
|
|||
|
||||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, OperationMode mode) override;
|
||||
void batchInsert(
|
||||
transaction::Methods & trx,
|
||||
std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void batchInsert(transaction::Methods*,
|
||||
std::vector<std::pair<LocalDocumentId, VPackSlice>> const&,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue>) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
||||
int sizeHint(transaction::Methods*, size_t) override;
|
||||
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
||||
|
||||
bool hasBatchInsert() const override { return true; }
|
||||
|
||||
|
@ -230,4 +240,4 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -214,9 +214,12 @@ bool MMFilesFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result MMFilesFulltextIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesFulltextIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
int r = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
@ -229,9 +232,12 @@ Result MMFilesFulltextIndex::insert(transaction::Methods*,
|
|||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesFulltextIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesFulltextIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
int r = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
|
|
@ -66,13 +66,19 @@ class MMFilesFulltextIndex final : public MMFilesIndex {
|
|||
|
||||
bool matchesDefinition(VPackSlice const&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
@ -146,4 +152,4 @@ class MMFilesFulltextIndexIterator : public IndexIterator {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -328,9 +328,12 @@ bool MMFilesGeoIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result MMFilesGeoIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesGeoIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) {
|
||||
// covering and centroid of coordinate / polygon / ...
|
||||
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
|
||||
std::vector<S2CellId> cells;
|
||||
|
@ -357,9 +360,12 @@ Result MMFilesGeoIndex::insert(transaction::Methods*,
|
|||
return res;
|
||||
}
|
||||
|
||||
Result MMFilesGeoIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesGeoIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) {
|
||||
// covering and centroid of coordinate / polygon / ...
|
||||
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
|
||||
std::vector<S2CellId> cells(reserve);
|
||||
|
|
|
@ -86,13 +86,19 @@ class MMFilesGeoIndex final : public MMFilesIndex, public geo_index::Index {
|
|||
|
||||
bool matchesDefinition(velocypack::Slice const& info) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
IndexIterator* iteratorForCondition(transaction::Methods*,
|
||||
ManagedDocumentResult*,
|
||||
|
@ -111,4 +117,4 @@ class MMFilesGeoIndex final : public MMFilesIndex, public geo_index::Index {
|
|||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -474,24 +474,27 @@ bool MMFilesHashIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result MMFilesHashIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesHashIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
if (_unique) {
|
||||
return insertUnique(trx, documentId, doc, mode);
|
||||
return insertUnique(&trx, documentId, doc, mode);
|
||||
}
|
||||
|
||||
return insertMulti(trx, documentId, doc, mode);
|
||||
return insertMulti(&trx, documentId, doc, mode);
|
||||
}
|
||||
|
||||
/// @brief removes an entry from the hash array part of the hash index
|
||||
Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesHashIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
|
@ -505,9 +508,9 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
for (auto& hashElement : elements) {
|
||||
int result;
|
||||
if (_unique) {
|
||||
result = removeUniqueElement(trx, hashElement, mode);
|
||||
result = removeUniqueElement(&trx, hashElement, mode);
|
||||
} else {
|
||||
result = removeMultiElement(trx, hashElement, mode);
|
||||
result = removeMultiElement(&trx, hashElement, mode);
|
||||
}
|
||||
|
||||
// we may be looping through this multiple times, and if an error
|
||||
|
@ -522,14 +525,15 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
void MMFilesHashIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue
|
||||
) {
|
||||
TRI_ASSERT(queue != nullptr);
|
||||
if (_unique) {
|
||||
batchInsertUnique(trx, documents, queue);
|
||||
batchInsertUnique(&trx, documents, queue);
|
||||
} else {
|
||||
batchInsertMulti(trx, documents, queue);
|
||||
batchInsertMulti(&trx, documents, queue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -545,7 +549,7 @@ void MMFilesHashIndex::unload() {
|
|||
}
|
||||
|
||||
/// @brief provides a size hint for the hash index
|
||||
int MMFilesHashIndex::sizeHint(transaction::Methods* trx, size_t size) {
|
||||
Result MMFilesHashIndex::sizeHint(transaction::Methods& trx, size_t size) {
|
||||
if (_sparse) {
|
||||
// for sparse indexes, we assume that we will have less index entries
|
||||
// than if the index would be fully populated
|
||||
|
@ -553,7 +557,7 @@ int MMFilesHashIndex::sizeHint(transaction::Methods* trx, size_t size) {
|
|||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths());
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
|
||||
|
||||
if (_unique) {
|
||||
return _uniqueArray->_hashArray->resize(&context, size);
|
||||
|
|
|
@ -264,22 +264,29 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
|
||||
bool matchesDefinition(VPackSlice const& info) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue
|
||||
) override;
|
||||
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void unload() override;
|
||||
|
||||
int sizeHint(transaction::Methods*, size_t) override;
|
||||
Result sizeHint(transaction::Methods& trx, size_t size) override;
|
||||
|
||||
bool hasBatchInsert() const override { return true; }
|
||||
|
||||
|
@ -363,4 +370,4 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -310,14 +310,16 @@ size_t MMFilesPersistentIndex::memory() const {
|
|||
}
|
||||
|
||||
/// @brief inserts a document into the index
|
||||
Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesPersistentIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int r;
|
||||
|
||||
try {
|
||||
r = fillElement(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
|
@ -342,9 +344,9 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths());
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
|
||||
VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc);
|
||||
auto prefix = buildPrefix(trx->vocbase().id(), _collection.id(), _iid);
|
||||
auto prefix = buildPrefix(trx.vocbase().id(), _collection.id(), _iid);
|
||||
VPackBuilder builder;
|
||||
std::vector<std::string> values;
|
||||
|
||||
|
@ -412,7 +414,7 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
auto rocksTransaction =
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->rocksTransaction();
|
||||
static_cast<MMFilesTransactionState*>(trx.state())->rocksTransaction();
|
||||
TRI_ASSERT(rocksTransaction != nullptr);
|
||||
|
||||
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
|
||||
|
@ -492,14 +494,16 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// @brief removes a document from the index
|
||||
Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result MMFilesPersistentIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int r;
|
||||
|
||||
try {
|
||||
r = fillElement(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
|
@ -524,7 +528,7 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths());
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
|
||||
VPackSlice const key = transaction::helpers::extractKeyFromDocument(doc);
|
||||
VPackBuilder builder;
|
||||
std::vector<std::string> values;
|
||||
|
@ -544,13 +548,13 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
std::string value;
|
||||
|
||||
value.reserve(keyPrefixSize() + s.byteSize());
|
||||
value.append(buildPrefix(trx->vocbase().id(), _collection.id(), _iid));
|
||||
value.append(buildPrefix(trx.vocbase().id(), _collection.id(), _iid));
|
||||
value.append(s.startAs<char const>(), s.byteSize());
|
||||
values.emplace_back(std::move(value));
|
||||
}
|
||||
|
||||
auto rocksTransaction =
|
||||
static_cast<MMFilesTransactionState*>(trx->state())->rocksTransaction();
|
||||
static_cast<MMFilesTransactionState*>(trx.state())->rocksTransaction();
|
||||
TRI_ASSERT(rocksTransaction != nullptr);
|
||||
|
||||
size_t const count = elements.size();
|
||||
|
@ -571,7 +575,7 @@ Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// @brief called when the index is dropped
|
||||
int MMFilesPersistentIndex::drop() {
|
||||
Result MMFilesPersistentIndex::drop() {
|
||||
return MMFilesPersistentIndexFeature::instance()->dropIndex(
|
||||
_collection.vocbase().id(), _collection.id(), _iid
|
||||
);
|
||||
|
|
|
@ -161,17 +161,23 @@ class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
|
|||
return value;
|
||||
}
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void unload() override {}
|
||||
|
||||
int drop() override;
|
||||
Result drop() override;
|
||||
|
||||
/// @brief attempts to locate an entry in the index
|
||||
///
|
||||
|
@ -199,6 +205,7 @@ class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
|
|||
arangodb::aql::AstNode* specializeCondition(
|
||||
arangodb::aql::AstNode*, arangodb::aql::Variable const*) const override;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -295,9 +295,12 @@ void MMFilesPrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
_primaryIndex->appendToVelocyPack(builder);
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const&, OperationMode) {
|
||||
Result MMFilesPrimaryIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
LOG_TOPIC(WARN, arangodb::Logger::ENGINES)
|
||||
<< "insert() called for primary index";
|
||||
|
@ -306,9 +309,12 @@ Result MMFilesPrimaryIndex::insert(transaction::Methods*,
|
|||
"insert() called for primary index");
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const&, OperationMode) {
|
||||
Result MMFilesPrimaryIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
LOG_TOPIC(WARN, arangodb::Logger::ENGINES)
|
||||
<< "remove() called for primary index";
|
||||
|
|
|
@ -219,13 +219,19 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
|
|||
std::underlying_type<Index::Serialize>::type) const override;
|
||||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
@ -322,4 +328,4 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -752,13 +752,17 @@ void MMFilesSkiplistIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
}
|
||||
|
||||
/// @brief inserts a document into a skiplist index
|
||||
Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesSkiplistIndex::insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
|
||||
int r;
|
||||
|
||||
try {
|
||||
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
|
@ -778,7 +782,7 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths());
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
|
||||
|
||||
// insert into the index. the memory for the element will be owned or freed
|
||||
// by the index
|
||||
|
@ -841,7 +845,10 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
LocalDocumentId rev(found->document()->localDocumentId());
|
||||
std::string existingId;
|
||||
|
||||
_collection.getPhysical()->readDocumentWithCallback(trx, rev, [&existingId](LocalDocumentId const&, VPackSlice doc) {
|
||||
_collection.getPhysical()->readDocumentWithCallback(
|
||||
&trx,
|
||||
rev,
|
||||
[&existingId](LocalDocumentId const&, velocypack::Slice doc)->void {
|
||||
existingId = doc.get(StaticStrings::KeyString).copyString();
|
||||
});
|
||||
|
||||
|
@ -856,13 +863,16 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// @brief removes a document from a skiplist index
|
||||
Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
Result MMFilesSkiplistIndex::remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
Result res;
|
||||
|
||||
int r;
|
||||
|
||||
try {
|
||||
r = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
|
@ -882,7 +892,7 @@ Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
MMFilesIndexLookupContext context(trx, &_collection, &result, numPaths());
|
||||
MMFilesIndexLookupContext context(&trx, &_collection, &result, numPaths());
|
||||
|
||||
// attempt the removal for skiplist indexes
|
||||
// ownership for the index element is transferred to the index
|
||||
|
|
|
@ -289,13 +289,19 @@ class MMFilesSkiplistIndex final : public MMFilesPathBasedIndex {
|
|||
|
||||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
void unload() override;
|
||||
|
||||
|
@ -352,4 +358,4 @@ class MMFilesSkiplistIndex final : public MMFilesPathBasedIndex {
|
|||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -1558,7 +1558,7 @@ int MMFilesWalRecoverState::fillIndexes() {
|
|||
arangodb::SingleCollectionTransaction trx(
|
||||
ctx, *collection, AccessMode::Type::WRITE
|
||||
);
|
||||
int res = physical->fillAllIndexes(&trx);
|
||||
int res = physical->fillAllIndexes(trx);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return res;
|
||||
|
|
|
@ -380,19 +380,22 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(
|
|||
return other;
|
||||
}
|
||||
|
||||
res = fillIndexes(&trx, idx);
|
||||
res = fillIndexes(trx, idx);
|
||||
|
||||
if (!res.ok()) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
|
||||
// we need to sync the selectivity estimates
|
||||
res = engine->settingsManager()->sync(false);
|
||||
|
||||
if (res.fail()) {
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "could not sync settings: "
|
||||
<< res.errorMessage();
|
||||
}
|
||||
|
||||
|
||||
rocksdb::Status s = engine->db()->GetRootDB()->FlushWAL(true);
|
||||
|
||||
if (!s.ok()) {
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "could not flush wal: "
|
||||
<< s.ToString();
|
||||
|
@ -467,7 +470,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
TRI_ASSERT(cindex != nullptr);
|
||||
|
||||
if (iid == cindex->id()) {
|
||||
int rv = cindex->drop();
|
||||
auto rv = cindex->drop().errorNumber();
|
||||
|
||||
if (rv == TRI_ERROR_NO_ERROR) {
|
||||
// trigger compaction before deleting the object
|
||||
|
@ -540,10 +543,12 @@ void RocksDBCollection::invokeOnAllElements(
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
Result RocksDBCollection::truncate(transaction::Methods* trx,
|
||||
OperationOptions& options) {
|
||||
Result RocksDBCollection::truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) {
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
auto state = RocksDBTransactionState::toState(&trx);
|
||||
RocksDBMethods* mthds = state->rocksdbMethods();
|
||||
|
||||
if (state->isOnlyExclusiveTransaction() &&
|
||||
|
@ -597,22 +602,30 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
// add the log entry so we can recover the correct count
|
||||
auto log = RocksDBLogValue::CollectionTruncate(trx->vocbase().id(),
|
||||
_logicalCollection.id(), _objectId);
|
||||
auto log = RocksDBLogValue::CollectionTruncate(
|
||||
trx.vocbase().id(), _logicalCollection.id(), _objectId
|
||||
);
|
||||
|
||||
s = batch.PutLogData(log.slice());
|
||||
|
||||
if (!s.ok()) {
|
||||
return rocksutils::convertStatus(s);
|
||||
}
|
||||
|
||||
rocksdb::WriteOptions wo;
|
||||
|
||||
s = db->Write(wo, &batch);
|
||||
|
||||
if (!s.ok()) {
|
||||
return rocksutils::convertStatus(s);
|
||||
}
|
||||
|
||||
seq = db->GetLatestSequenceNumber() - 1; // post commit sequence
|
||||
|
||||
uint64_t numDocs = _numberDocuments.exchange(0);
|
||||
|
||||
_meta.adjustNumberDocuments(seq, /*revision*/newRevisionId(), - static_cast<int64_t>(numDocs));
|
||||
|
||||
{
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
|
@ -665,14 +678,14 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
TRI_ASSERT(key.isString());
|
||||
TRI_ASSERT(rid != 0);
|
||||
|
||||
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
RocksDBSavePoint guard(&trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
state->prepareOperation(_logicalCollection.id(),
|
||||
rid, // actual revision ID!!
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
LocalDocumentId const docId = RocksDBKey::documentId(iter->key());
|
||||
auto res = removeDocument(trx, docId, doc, options);
|
||||
auto res = removeDocument(&trx, docId, doc, options);
|
||||
|
||||
if (res.fail()) { // Failed to remove document in truncate.
|
||||
return res;
|
||||
|
@ -688,7 +701,7 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
}
|
||||
guard.finish(hasPerformedIntermediateCommit);
|
||||
|
||||
trackWaitForSync(trx, options);
|
||||
trackWaitForSync(&trx, options);
|
||||
iter->Next();
|
||||
}
|
||||
|
||||
|
@ -1099,11 +1112,17 @@ Result RocksDBCollection::replace(
|
|||
}
|
||||
|
||||
Result RocksDBCollection::remove(
|
||||
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous, OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick, bool, TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId, KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<Result(void)> callbackDuringLock) {
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool /*lock*/,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) {
|
||||
// store the tick that was used for writing the document
|
||||
// note that we don't need it for this engine
|
||||
resultMarkerTick = 0;
|
||||
|
@ -1119,7 +1138,8 @@ Result RocksDBCollection::remove(
|
|||
TRI_ASSERT(!key.isNone());
|
||||
|
||||
// get the previous revision
|
||||
Result res = this->read(trx, key, previous, /*lock*/false);
|
||||
auto res = this->read(&trx, key, previous, /*lock*/false);
|
||||
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
}
|
||||
|
@ -1135,24 +1155,24 @@ Result RocksDBCollection::remove(
|
|||
// Check old revision:
|
||||
if (!options.ignoreRevs && slice.isObject()) {
|
||||
TRI_voc_rid_t expectedRevisionId = TRI_ExtractRevisionId(slice);
|
||||
int res = checkRevision(trx, expectedRevisionId, oldRevisionId);
|
||||
auto res = checkRevision(&trx, expectedRevisionId, oldRevisionId);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return Result(res);
|
||||
}
|
||||
}
|
||||
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
RocksDBSavePoint guard(trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
auto state = RocksDBTransactionState::toState(&trx);
|
||||
RocksDBSavePoint guard(&trx, TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(
|
||||
_logicalCollection.id(), oldRevisionId, TRI_VOC_DOCUMENT_OPERATION_REMOVE
|
||||
);
|
||||
res = removeDocument(trx, oldDocumentId, oldDoc, options);
|
||||
res = removeDocument(&trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.ok()) {
|
||||
trackWaitForSync(trx, options);
|
||||
trackWaitForSync(&trx, options);
|
||||
|
||||
bool hasPerformedIntermediateCommit = false;
|
||||
|
||||
|
@ -1238,13 +1258,15 @@ void RocksDBCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
|||
}
|
||||
|
||||
template<typename WriteBatchType, typename MethodsType>
|
||||
static arangodb::Result fillIndex(transaction::Methods* trx,
|
||||
RocksDBIndex* ridx,
|
||||
std::unique_ptr<IndexIterator> it,
|
||||
WriteBatchType& batch,
|
||||
RocksDBCollection* rcol) {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
|
||||
static arangodb::Result fillIndex(
|
||||
transaction::Methods& trx,
|
||||
RocksDBIndex* ridx,
|
||||
std::unique_ptr<IndexIterator> it,
|
||||
WriteBatchType& batch,
|
||||
RocksDBCollection* rcol
|
||||
) {
|
||||
auto state = RocksDBTransactionState::toState(&trx);
|
||||
|
||||
// fillindex can be non transactional, we just need to clean up
|
||||
rocksdb::DB* db = rocksutils::globalRocksDB()->GetRootDB();
|
||||
TRI_ASSERT(db != nullptr);
|
||||
|
@ -1258,6 +1280,7 @@ static arangodb::Result fillIndex(transaction::Methods* trx,
|
|||
if (res.ok()) {
|
||||
res = ridx->insertInternal(trx, &batched, documentId, slice,
|
||||
Index::OperationMode::normal);
|
||||
|
||||
if (res.ok()) {
|
||||
numDocsWritten++;
|
||||
}
|
||||
|
@ -1306,13 +1329,15 @@ static arangodb::Result fillIndex(transaction::Methods* trx,
|
|||
/// non-transactional: fill index with existing documents
|
||||
/// from this collection
|
||||
arangodb::Result RocksDBCollection::fillIndexes(
|
||||
transaction::Methods* trx, std::shared_ptr<arangodb::Index> added) {
|
||||
TRI_ASSERT(trx->state()->collection(
|
||||
transaction::Methods& trx,
|
||||
std::shared_ptr<arangodb::Index> added
|
||||
) {
|
||||
TRI_ASSERT(trx.state()->collection(
|
||||
_logicalCollection.id(), AccessMode::Type::EXCLUSIVE
|
||||
));
|
||||
|
||||
|
||||
std::unique_ptr<IndexIterator> it(new RocksDBAllIndexIterator(
|
||||
&_logicalCollection, trx, primaryIndex()
|
||||
&_logicalCollection, &trx, primaryIndex()
|
||||
));
|
||||
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get());
|
||||
|
@ -1360,13 +1385,16 @@ Result RocksDBCollection::insertDocument(
|
|||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
|
||||
Result tmpres = rIdx->insertInternal(trx, mthds, documentId, doc,
|
||||
options.indexOperationMode);
|
||||
auto tmpres = rIdx->insertInternal(
|
||||
*trx, mthds, documentId, doc, options.indexOperationMode
|
||||
);
|
||||
|
||||
if (tmpres.fail()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
return tmpres;
|
||||
}
|
||||
|
||||
if (tmpres.is(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) || res.ok()) {
|
||||
// "prefer" unique constraint violated over other errors
|
||||
res.reset(tmpres);
|
||||
|
@ -1409,12 +1437,15 @@ Result RocksDBCollection::removeDocument(
|
|||
Result resInner;
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
Result tmpres = idx->remove(trx, documentId, doc, options.indexOperationMode);
|
||||
auto tmpres =
|
||||
idx->remove(*trx, documentId, doc, options.indexOperationMode);
|
||||
|
||||
if (tmpres.fail()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
return tmpres;
|
||||
}
|
||||
|
||||
// for other errors, set result
|
||||
res.reset(tmpres);
|
||||
}
|
||||
|
@ -1464,13 +1495,22 @@ Result RocksDBCollection::updateDocument(
|
|||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
|
||||
Result tmpres = rIdx->updateInternal(trx, mthd, oldDocumentId, oldDoc, newDocumentId,
|
||||
newDoc, options.indexOperationMode);
|
||||
auto tmpres = rIdx->updateInternal(
|
||||
*trx,
|
||||
mthd,
|
||||
oldDocumentId,
|
||||
oldDoc,
|
||||
newDocumentId,
|
||||
newDoc,
|
||||
options.indexOperationMode
|
||||
);
|
||||
|
||||
if (tmpres.fail()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
return tmpres;
|
||||
}
|
||||
|
||||
res.reset(tmpres);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,10 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
Result truncate(transaction::Methods* trx, OperationOptions&) override;
|
||||
Result truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) override;
|
||||
|
||||
void deferDropCollection(
|
||||
std::function<bool(LogicalCollection&)> const& callback
|
||||
|
@ -169,13 +172,18 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
|
||||
Result remove(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<Result(void)> callbackDuringLock) override;
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) override;
|
||||
|
||||
/// adjust the current number of docs
|
||||
void adjustNumberDocuments(TRI_voc_rid_t revisionId, int64_t adjustment);
|
||||
|
@ -208,8 +216,10 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
void figuresSpecific(std::shared_ptr<velocypack::Builder>&) override;
|
||||
void addIndex(std::shared_ptr<arangodb::Index> idx);
|
||||
|
||||
arangodb::Result fillIndexes(transaction::Methods*,
|
||||
std::shared_ptr<arangodb::Index>);
|
||||
arangodb::Result fillIndexes(
|
||||
transaction::Methods& trx,
|
||||
std::shared_ptr<arangodb::Index> indexes
|
||||
);
|
||||
|
||||
// @brief return the primary index
|
||||
// WARNING: Make sure that this instance
|
||||
|
@ -285,4 +295,4 @@ inline RocksDBCollection* toRocksDBCollection(LogicalCollection& logical) {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -572,18 +572,21 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder,
|
|||
builder.close();
|
||||
}
|
||||
|
||||
Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBEdgeIndex::insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
key->constructEdgeIndexValue(_objectId, fromToRef, documentId);
|
||||
|
||||
VPackSlice toFrom = _isFromIndex
|
||||
? transaction::helpers::extractToFromDocument(doc)
|
||||
: transaction::helpers::extractFromFromDocument(doc);
|
||||
|
@ -595,31 +598,35 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
|||
|
||||
// acquire rocksdb transaction
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
|
||||
if (s.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexInsert(
|
||||
RocksDBTransactionState::toState(&trx)->trackIndexInsert(
|
||||
_collection.id(), id(), hash
|
||||
);
|
||||
} else {
|
||||
res.reset(rocksutils::convertStatus(s));
|
||||
addErrorMsg(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBEdgeIndex::removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
|
||||
// VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
key->constructEdgeIndexValue(_objectId, fromToRef, documentId);
|
||||
VPackSlice toFrom = _isFromIndex
|
||||
? transaction::helpers::extractToFromDocument(doc)
|
||||
|
@ -634,7 +641,7 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
|||
if (s.ok()) {
|
||||
std::hash<StringRef> hasher;
|
||||
uint64_t hash = static_cast<uint64_t>(hasher(fromToRef));
|
||||
RocksDBTransactionState::toState(trx)->trackIndexRemove(
|
||||
RocksDBTransactionState::toState(&trx)->trackIndexRemove(
|
||||
_collection.id(), id(), hash
|
||||
);
|
||||
} else {
|
||||
|
@ -646,15 +653,16 @@ Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
void RocksDBEdgeIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, VPackSlice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
auto* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
auto* mthds = RocksDBTransactionState::toMethods(&trx);
|
||||
|
||||
for (auto const& doc : documents) {
|
||||
VPackSlice fromTo = doc.second.get(_directionAttr);
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
key->constructEdgeIndexValue(_objectId, fromToRef, doc.first);
|
||||
|
||||
blackListKey(fromToRef);
|
||||
|
|
|
@ -148,9 +148,10 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
std::underlying_type<Index::Serialize>::type) const override;
|
||||
|
||||
void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const&,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
|
||||
transaction::Methods& trx,
|
||||
std::vector<std::pair<LocalDocumentId, velocypack::Slice>> const& docs,
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue
|
||||
) override;
|
||||
|
||||
bool hasBatchInsert() const override { return false; }
|
||||
|
||||
|
@ -174,15 +175,21 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
|
||||
void afterTruncate(TRI_voc_tick_t tick) override;
|
||||
|
||||
Result insertInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
private:
|
||||
/// @brief create the iterator
|
||||
|
|
|
@ -1289,6 +1289,7 @@ arangodb::Result RocksDBEngine::dropCollection(
|
|||
// delete indexes, RocksDBIndex::drop() has its own check
|
||||
std::vector<std::shared_ptr<Index>> vecShardIndex = coll->getIndexes();
|
||||
TRI_ASSERT(!vecShardIndex.empty());
|
||||
|
||||
for (auto& index : vecShardIndex) {
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(index.get());
|
||||
res = RocksDBCollectionMeta::deleteIndexEstimate(db, ridx->objectId());
|
||||
|
@ -1296,8 +1297,9 @@ arangodb::Result RocksDBEngine::dropCollection(
|
|||
LOG_TOPIC(WARN, Logger::ENGINES) << "could not delete index estimate: "
|
||||
<< res.errorMessage();
|
||||
}
|
||||
|
||||
int dropRes = index->drop();
|
||||
|
||||
auto dropRes = index->drop().errorNumber();
|
||||
|
||||
if (dropRes != TRI_ERROR_NO_ERROR) {
|
||||
// We try to remove all indexed values.
|
||||
// If it does not work they cannot be accessed any more and leaked.
|
||||
|
|
|
@ -185,13 +185,16 @@ bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBFulltextIndex::insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (words.empty()) {
|
||||
return res;
|
||||
}
|
||||
|
@ -202,27 +205,32 @@ Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
|
|||
|
||||
// size_t const count = words.size();
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
|
||||
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBFulltextIndex::removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (words.empty()) {
|
||||
return res;
|
||||
}
|
||||
|
@ -230,16 +238,19 @@ Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
|
|||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
key->constructFulltextIndexValue(_objectId, StringRef(word), documentId);
|
||||
|
||||
rocksdb::Status s = mthd->Delete(_cf, key.ref());
|
||||
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,16 +111,22 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
|
|||
|
||||
protected:
|
||||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
private:
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
|
|
@ -399,33 +399,43 @@ IndexIterator* RocksDBGeoIndex::iteratorForCondition(
|
|||
}
|
||||
|
||||
/// internal insert function, set batch or trx before calling
|
||||
Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBGeoIndex::insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
|
||||
// covering and centroid of coordinate / polygon / ...
|
||||
size_t reserve = _variant == Variant::GEOJSON ? 8 : 1;
|
||||
std::vector<S2CellId> cells;
|
||||
|
||||
cells.reserve(reserve);
|
||||
|
||||
S2Point centroid;
|
||||
|
||||
res = geo_index::Index::indexCells(doc, cells, centroid);
|
||||
|
||||
if (res.fail()) {
|
||||
if (res.is(TRI_ERROR_BAD_PARAMETER)) {
|
||||
res.reset(); // Invalid, no insert. Index is sparse
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
TRI_ASSERT(!cells.empty());
|
||||
TRI_ASSERT(S2::IsUnitLength(centroid));
|
||||
|
||||
RocksDBValue val = RocksDBValue::S2Value(centroid);
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
for (S2CellId cell : cells) {
|
||||
key->constructGeoIndexValue(_objectId, cell.id(), documentId);
|
||||
rocksdb::Status s = mthd->Put(RocksDBColumnFamily::geo(), key.ref(), val.string());
|
||||
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
addErrorMsg(res);
|
||||
|
@ -437,26 +447,33 @@ Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// internal remove function, set batch or trx before calling
|
||||
Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBGeoIndex::removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
|
||||
// covering and centroid of coordinate / polygon / ...
|
||||
std::vector<S2CellId> cells;
|
||||
S2Point centroid;
|
||||
|
||||
res = geo_index::Index::indexCells(doc, cells, centroid);
|
||||
|
||||
if (res.fail()) { // might occur if insert is rolled back
|
||||
if (res.is(TRI_ERROR_BAD_PARAMETER)) {
|
||||
res.reset(); // Invalid, no insert. Index is sparse
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
TRI_ASSERT(!cells.empty());
|
||||
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
// FIXME: can we rely on the region coverer to return
|
||||
// the same cells everytime for the same parameters ?
|
||||
for (S2CellId cell : cells) {
|
||||
|
|
|
@ -82,20 +82,26 @@ class RocksDBGeoIndex final : public RocksDBIndex, public geo_index::Index {
|
|||
bool matchesDefinition(velocypack::Slice const& info) const override;
|
||||
|
||||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& docs,
|
||||
arangodb::Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
private:
|
||||
std::string const _typeName;
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -112,8 +112,19 @@ Result removeKeysOutsideRange(VPackSlice chunkSlice,
|
|||
builder.clear();
|
||||
builder.add(velocypack::ValuePair(docKey.data(), docKey.size(),
|
||||
velocypack::ValueType::String));
|
||||
Result r = physical->remove(&trx, builder.slice(), mdr, options, tick,
|
||||
false, prevRev, revisionId, nullptr, nullptr);
|
||||
auto r = physical->remove(
|
||||
trx,
|
||||
builder.slice(),
|
||||
mdr,
|
||||
options,
|
||||
tick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
// ignore not found, we remove conflicting docs ahead of time
|
||||
THROW_ARANGO_EXCEPTION(r);
|
||||
|
@ -147,8 +158,19 @@ Result removeKeysOutsideRange(VPackSlice chunkSlice,
|
|||
builder.clear();
|
||||
builder.add(velocypack::ValuePair(docKey.data(), docKey.size(),
|
||||
velocypack::ValueType::String));
|
||||
Result r = physical->remove(&trx, builder.slice(), mdr, options, tick,
|
||||
false, prevRev, revisionId, nullptr, nullptr);
|
||||
auto r = physical->remove(
|
||||
trx,
|
||||
builder.slice(),
|
||||
mdr,
|
||||
options,
|
||||
tick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
// ignore not found, we remove conflicting docs ahead of time
|
||||
THROW_ARANGO_EXCEPTION(r);
|
||||
|
@ -301,14 +323,24 @@ Result syncChunkRocksDB(
|
|||
keyBuilder->clear();
|
||||
keyBuilder->add(VPackValue(localKey));
|
||||
|
||||
Result r =
|
||||
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick,
|
||||
false, prevRev, revisionId, nullptr, nullptr);
|
||||
auto r = physical->remove(
|
||||
*trx,
|
||||
keyBuilder->slice(),
|
||||
mdr,
|
||||
options,
|
||||
resultTick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
// ignore not found, we remove conflicting docs ahead of time
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
if (r.ok()) {
|
||||
++stats.numDocsRemoved;
|
||||
}
|
||||
|
@ -355,9 +387,19 @@ Result syncChunkRocksDB(
|
|||
keyBuilder->clear();
|
||||
keyBuilder->add(VPackValue(localKey));
|
||||
|
||||
Result r =
|
||||
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick,
|
||||
false, prevRev, revisionId, nullptr, nullptr);
|
||||
auto r = physical->remove(
|
||||
*trx,
|
||||
keyBuilder->slice(),
|
||||
mdr,
|
||||
options,
|
||||
resultTick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
// ignore not found, we remove conflicting docs ahead of time
|
||||
return r;
|
||||
|
@ -481,16 +523,28 @@ Result syncChunkRocksDB(
|
|||
keyBuilder->clear();
|
||||
keyBuilder->add(VPackValue(conflictingKey));
|
||||
|
||||
Result res =
|
||||
physical->remove(trx, keyBuilder->slice(), mdr, options, resultTick,
|
||||
false, prevRev, revisionId, nullptr, nullptr);
|
||||
auto res = physical->remove(
|
||||
*trx,
|
||||
keyBuilder->slice(),
|
||||
mdr,
|
||||
options,
|
||||
resultTick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (res.ok()) {
|
||||
++stats.numDocsRemoved;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
LocalDocumentId const documentId = physical->lookupKey(trx, keySlice);
|
||||
|
||||
if (!documentId.isSet()) {
|
||||
// INSERT
|
||||
TRI_ASSERT(options.indexOperationMode == Index::OperationMode::internal);
|
||||
|
@ -743,9 +797,19 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
ManagedDocumentResult previous;
|
||||
TRI_voc_rid_t resultMarkerTick;
|
||||
TRI_voc_rid_t prevRev, revisionId;
|
||||
Result r = physical->remove(&trx, tempBuilder.slice(), previous,
|
||||
options, resultMarkerTick, false,
|
||||
prevRev, revisionId, nullptr, nullptr);
|
||||
auto r = physical->remove(
|
||||
trx,
|
||||
tempBuilder.slice(),
|
||||
previous,
|
||||
options,
|
||||
resultMarkerTick,
|
||||
false,
|
||||
prevRev,
|
||||
revisionId,
|
||||
nullptr,
|
||||
nullptr
|
||||
);
|
||||
|
||||
if (r.fail() && r.isNot(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND)) {
|
||||
// ignore not found, we remove conflicting docs ahead of time
|
||||
THROW_ARANGO_EXCEPTION(r);
|
||||
|
@ -754,9 +818,10 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
|
|||
if (r.ok()) {
|
||||
++stats.numDocsRemoved;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
bool rangeUnequal = false;
|
||||
bool nextChunk = false;
|
||||
int cmp2 = docKey.compare(highKey);
|
||||
|
|
|
@ -202,7 +202,7 @@ void RocksDBIndex::destroyCache() {
|
|||
_cachePresent = false;
|
||||
}
|
||||
|
||||
int RocksDBIndex::drop() {
|
||||
Result RocksDBIndex::drop() {
|
||||
auto* coll = toRocksDBCollection(_collection);
|
||||
// edge index needs to be dropped with prefixSameAsStart = false
|
||||
// otherwise full index scan will not work
|
||||
|
@ -235,7 +235,7 @@ int RocksDBIndex::drop() {
|
|||
}
|
||||
#endif
|
||||
|
||||
return r.errorNumber();
|
||||
return r;
|
||||
}
|
||||
|
||||
void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
|
||||
|
@ -247,12 +247,15 @@ void RocksDBIndex::afterTruncate(TRI_voc_tick_t) {
|
|||
}
|
||||
}
|
||||
|
||||
Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* mthd,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
arangodb::velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBIndex::updateInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
// It is illegal to call this method on the primary index
|
||||
// RocksDBPrimaryIndex must override this method accordingly
|
||||
TRI_ASSERT(type() != TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
|
|
@ -65,7 +65,28 @@ class RocksDBIndex : public Index {
|
|||
|
||||
bool isPersistent() const override final { return true; }
|
||||
|
||||
int drop() override;
|
||||
Result drop() override;
|
||||
|
||||
Result insert(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(&trx);
|
||||
return insertInternal(trx, mthds, documentId, doc, mode);
|
||||
}
|
||||
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(&trx);
|
||||
return removeInternal(trx, mthds, documentId, doc, mode);
|
||||
}
|
||||
|
||||
virtual void afterTruncate(TRI_voc_tick_t tick) override;
|
||||
|
||||
void load() override;
|
||||
|
@ -76,22 +97,11 @@ class RocksDBIndex : public Index {
|
|||
void cleanup();
|
||||
|
||||
/// @brief provides a size hint for the index
|
||||
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final {
|
||||
// nothing to do here
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
Result insert(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc, OperationMode mode) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(trx);
|
||||
return insertInternal(trx, mthds, documentId, doc, mode);
|
||||
}
|
||||
|
||||
Result remove(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(trx);
|
||||
return removeInternal(trx, mthds, documentId, doc, mode);
|
||||
Result sizeHint(
|
||||
transaction::Methods& /*trx*/,
|
||||
size_t /*size*/
|
||||
) override final {
|
||||
return Result(); // nothing to do here
|
||||
}
|
||||
|
||||
void setCacheEnabled(bool enable) {
|
||||
|
@ -102,23 +112,32 @@ class RocksDBIndex : public Index {
|
|||
void destroyCache();
|
||||
|
||||
/// insert index elements into the specified write batch.
|
||||
virtual Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
|
||||
virtual Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode);
|
||||
virtual Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) = 0;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
virtual Result removeInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
virtual Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) = 0;
|
||||
|
||||
virtual Result updateInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
);
|
||||
|
||||
rocksdb::ColumnFamilyHandle* columnFamily() const { return _cf; }
|
||||
|
||||
|
@ -170,4 +189,4 @@ class RocksDBIndex : public Index {
|
|||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -334,26 +334,32 @@ bool RocksDBPrimaryIndex::lookupRevision(transaction::Methods* trx,
|
|||
return true;
|
||||
}
|
||||
|
||||
Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
Result RocksDBPrimaryIndex::insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& slice,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
|
||||
TRI_ASSERT(keySlice.isString());
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
|
||||
|
||||
rocksdb::PinnableSlice val;
|
||||
rocksdb::Status s = mthd->Get(_cf, key->string(), &val);
|
||||
|
||||
if (s.ok()) { // detected conflicting primary key
|
||||
std::string existingId = keySlice.copyString();
|
||||
|
||||
if (mode == OperationMode::internal) {
|
||||
return res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, std::move(existingId));
|
||||
}
|
||||
|
||||
res.reset(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED);
|
||||
|
||||
return addErrorMsg(res, existingId);
|
||||
}
|
||||
val.Reset(); // clear used memory
|
||||
|
@ -371,25 +377,28 @@ Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBPrimaryIndex::updateInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(oldDoc);
|
||||
TRI_ASSERT(keySlice == oldDoc.get(StaticStrings::KeyString));
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
|
||||
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
|
||||
|
||||
TRI_voc_rid_t revision = transaction::helpers::extractRevFromDocument(newDoc);
|
||||
auto value = RocksDBValue::PrimaryIndexValue(newDocumentId, revision);
|
||||
|
||||
blackListKey(key->string().data(),
|
||||
static_cast<uint32_t>(key->string().size()));
|
||||
|
||||
|
||||
rocksdb::Status s = mthd->Put(_cf, key.ref(), value.string());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
|
@ -398,24 +407,26 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
Result RocksDBPrimaryIndex::removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& slice,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
|
||||
// TODO: deal with matching revisions?
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
|
||||
TRI_ASSERT(keySlice.isString());
|
||||
RocksDBKeyLeaser key(trx);
|
||||
RocksDBKeyLeaser key(&trx);
|
||||
key->constructPrimaryIndexValue(
|
||||
_objectId, StringRef(keySlice));
|
||||
|
||||
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
auto* mthds = RocksDBTransactionState::toMethods(&trx);
|
||||
rocksdb::Status s = mthds->Delete(_cf, key.ref());
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
|
|
|
@ -177,23 +177,32 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
std::function<bool(LocalDocumentId const&)> callback) const;
|
||||
|
||||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) override;
|
||||
Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result updateInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
private:
|
||||
/// @brief create the iterator, for a single attribute, IN operator
|
||||
|
|
|
@ -633,29 +633,34 @@ void RocksDBVPackIndex::fillPaths(std::vector<std::vector<std::string>>& paths,
|
|||
}
|
||||
|
||||
/// @brief inserts a document into the index
|
||||
Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBVPackIndex::insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||
SmallVector<RocksDBKey> elements{elementsArena};
|
||||
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
|
||||
SmallVector<uint64_t> hashes{hashesArena};
|
||||
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
transaction::BuilderLeaser leased(&trx);
|
||||
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
|
||||
IndexingDisabler guard(
|
||||
mthds, !_unique && trx.hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)
|
||||
);
|
||||
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
RocksDBValue value = _unique ? RocksDBValue::UniqueVPackIndexValue(documentId)
|
||||
|
@ -681,7 +686,8 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
if (res.ok() && !_unique) {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
auto state = RocksDBTransactionState::toState(&trx);
|
||||
|
||||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
|
@ -691,7 +697,9 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
// find conflicting document
|
||||
LocalDocumentId docId = RocksDBValue::documentId(existing);
|
||||
std::string existingKey;
|
||||
bool success = _collection.getPhysical()->readDocumentWithCallback(trx, docId,
|
||||
auto success = _collection.getPhysical()->readDocumentWithCallback(
|
||||
&trx,
|
||||
docId,
|
||||
[&](LocalDocumentId const&, VPackSlice doc) {
|
||||
existingKey = transaction::helpers::extractKeyFromDocument(doc).copyString();
|
||||
});
|
||||
|
@ -710,11 +718,14 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
Result RocksDBVPackIndex::updateInternal(
|
||||
transaction::Methods* trx, RocksDBMethods* mthds,
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId, velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
if (!_unique || _useExpansion) {
|
||||
// only unique index supports in-place updates
|
||||
// lets also not handle the complex case of expanded arrays
|
||||
|
@ -723,12 +734,13 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
} else {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
bool equal = true;
|
||||
|
||||
for (size_t i = 0; i < _paths.size(); ++i) {
|
||||
TRI_ASSERT(!_paths[i].empty());
|
||||
VPackSlice oldSlice = oldDoc.get(_paths[i]);
|
||||
VPackSlice newSlice = newDoc.get(_paths[i]);
|
||||
|
||||
if ((oldSlice.isNone() || oldSlice.isNull()) &&
|
||||
(newSlice.isNone() || newSlice.isNull())) {
|
||||
// attribute not found
|
||||
|
@ -755,8 +767,9 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
SmallVector<uint64_t> hashes{hashesArena};
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
transaction::BuilderLeaser leased(&trx);
|
||||
int r = fillElement(*(leased.get()), newDocumentId, newDoc, elements, hashes);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
|
@ -778,14 +791,15 @@ Result RocksDBVPackIndex::updateInternal(
|
|||
}
|
||||
|
||||
/// @brief removes a document from the index
|
||||
Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
Result RocksDBVPackIndex::removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) {
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
|
||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||
SmallVector<RocksDBKey> elements{elementsArena};
|
||||
SmallVector<uint64_t>::allocator_type::arena_type hashesArena;
|
||||
|
@ -793,19 +807,24 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
|||
|
||||
{
|
||||
// rethrow all types of exceptions from here...
|
||||
transaction::BuilderLeaser leased(trx);
|
||||
transaction::BuilderLeaser leased(&trx);
|
||||
int r = fillElement(*(leased.get()), documentId, doc, elements, hashes);
|
||||
|
||||
if (r != TRI_ERROR_NO_ERROR) {
|
||||
return addErrorMsg(res, r);
|
||||
}
|
||||
}
|
||||
|
||||
IndexingDisabler guard(mthds, !_unique && trx->hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
IndexingDisabler guard(
|
||||
mthds, !_unique && trx.hasHint(transaction::Hints::Hint::FROM_TOPLEVEL_AQL)
|
||||
);
|
||||
|
||||
size_t const count = elements.size();
|
||||
|
||||
if (_unique) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
s = mthds->Delete(_cf, elements[i]);
|
||||
|
||||
if (!s.ok()) {
|
||||
res.reset(rocksutils::convertStatus(s, rocksutils::index));
|
||||
}
|
||||
|
@ -822,7 +841,8 @@ Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
if (res.ok() && !_unique) {
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
auto state = RocksDBTransactionState::toState(&trx);
|
||||
|
||||
for (auto& it : hashes) {
|
||||
// The estimator is only useful if we are in a non-unique indexes
|
||||
TRI_ASSERT(!_unique);
|
||||
|
|
|
@ -209,22 +209,31 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
void afterTruncate(TRI_voc_tick_t tick) override;
|
||||
|
||||
protected:
|
||||
Result insertInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result insertInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) override;
|
||||
Result removeInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
Result updateInternal(
|
||||
transaction::Methods& trx,
|
||||
RocksDBMethods* methods,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
Index::OperationMode mode
|
||||
) override;
|
||||
|
||||
private:
|
||||
/// @brief return the number of paths
|
||||
|
@ -282,4 +291,4 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
};
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -133,8 +133,10 @@ class PhysicalCollection {
|
|||
// -- SECTION DML Operations --
|
||||
///////////////////////////////////
|
||||
|
||||
virtual Result truncate(transaction::Methods* trx,
|
||||
OperationOptions& options) = 0;
|
||||
virtual Result truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) = 0;
|
||||
|
||||
/// @brief Defer a callback to be executed when the collection
|
||||
/// can be dropped. The callback is supposed to drop
|
||||
|
@ -204,14 +206,18 @@ class PhysicalCollection {
|
|||
ManagedDocumentResult& previous,
|
||||
std::function<Result(void)> callbackDuringLock) = 0;
|
||||
|
||||
virtual Result remove(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick, bool lock,
|
||||
TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock) = 0;
|
||||
virtual Result remove(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
ManagedDocumentResult& previous,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) = 0;
|
||||
|
||||
protected:
|
||||
PhysicalCollection(
|
||||
|
@ -270,4 +276,4 @@ class PhysicalCollection {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -2410,9 +2410,17 @@ OperationResult transaction::Methods::removeLocal(
|
|||
|
||||
TRI_ASSERT(needsLock == !isLocked(collection, AccessMode::Type::WRITE));
|
||||
|
||||
Result res =
|
||||
collection->remove(this, value, options, resultMarkerTick, needsLock,
|
||||
actualRevision, previous, &keyLockInfo, updateFollowers);
|
||||
auto res = collection->remove(
|
||||
*this,
|
||||
value,
|
||||
options,
|
||||
resultMarkerTick,
|
||||
needsLock,
|
||||
actualRevision,
|
||||
previous,
|
||||
&keyLockInfo,
|
||||
updateFollowers
|
||||
);
|
||||
|
||||
if (resultMarkerTick > 0 && resultMarkerTick > maxTick) {
|
||||
maxTick = resultMarkerTick;
|
||||
|
@ -2622,11 +2630,13 @@ OperationResult transaction::Methods::truncateLocal(
|
|||
|
||||
TRI_ASSERT(isLocked(collection, AccessMode::Type::WRITE));
|
||||
|
||||
Result res = collection->truncate(this, options);
|
||||
auto res = collection->truncate(*this, options);;
|
||||
|
||||
if (res.fail()) {
|
||||
if (lockResult.is(TRI_ERROR_LOCKED)) {
|
||||
unlockRecursive(cid, AccessMode::Type::WRITE);
|
||||
}
|
||||
|
||||
return OperationResult(res);
|
||||
}
|
||||
|
||||
|
|
|
@ -907,11 +907,14 @@ Result LogicalCollection::read(transaction::Methods* trx, arangodb::velocypack::
|
|||
/// the read-cache
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Result LogicalCollection::truncate(transaction::Methods* trx,
|
||||
OperationOptions& options) {
|
||||
Result LogicalCollection::truncate(
|
||||
transaction::Methods& trx,
|
||||
OperationOptions& options
|
||||
) {
|
||||
TRI_IF_FAILURE("LogicalCollection::truncate") {
|
||||
return Result(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
return getPhysical()->truncate(trx, options);
|
||||
}
|
||||
|
||||
|
@ -983,11 +986,16 @@ Result LogicalCollection::replace(
|
|||
|
||||
/// @brief removes a document or edge
|
||||
Result LogicalCollection::remove(
|
||||
transaction::Methods* trx, VPackSlice const slice,
|
||||
OperationOptions& options, TRI_voc_tick_t& resultMarkerTick, bool lock,
|
||||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice const slice,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
ManagedDocumentResult& previous,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock) {
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
) {
|
||||
TRI_IF_FAILURE("LogicalCollection::remove") {
|
||||
return Result(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ class LogicalCollection : public LogicalDataSource {
|
|||
ManagedDocumentResult& result, bool);
|
||||
|
||||
/// @brief processes a truncate operation
|
||||
Result truncate(transaction::Methods* trx, OperationOptions&);
|
||||
Result truncate(transaction::Methods& trx, OperationOptions& options);
|
||||
|
||||
// convenience function for downwards-compatibility
|
||||
Result insert(transaction::Methods* trx, velocypack::Slice const slice,
|
||||
|
@ -311,11 +311,17 @@ class LogicalCollection : public LogicalDataSource {
|
|||
ManagedDocumentResult& previous,
|
||||
std::function<Result(void)> callbackDuringLock);
|
||||
|
||||
Result remove(transaction::Methods*, velocypack::Slice,
|
||||
OperationOptions&, TRI_voc_tick_t&, bool lock,
|
||||
TRI_voc_rid_t& prevRev, ManagedDocumentResult& previous,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock);
|
||||
Result remove(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice slice,
|
||||
OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
ManagedDocumentResult& previous,
|
||||
KeyLockInfo* keyLockInfo,
|
||||
std::function<Result(void)> callbackDuringLock
|
||||
);
|
||||
|
||||
bool readDocument(transaction::Methods* trx,
|
||||
LocalDocumentId const& token,
|
||||
|
@ -418,4 +424,4 @@ class LogicalCollection : public LogicalDataSource {
|
|||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -547,6 +547,7 @@ SECTION("test_text_features") {
|
|||
}
|
||||
|
||||
SECTION("test_persistence") {
|
||||
static std::vector<std::string> const EMPTY;
|
||||
auto* database = arangodb::application_features::ApplicationServer::lookupFeature<
|
||||
arangodb::SystemDatabaseFeature
|
||||
>();
|
||||
|
@ -701,7 +702,14 @@ SECTION("test_persistence") {
|
|||
arangodb::OperationOptions options;
|
||||
arangodb::ManagedDocumentResult result;
|
||||
auto collection = vocbase->lookupCollection("_iresearch_analyzers");
|
||||
collection->truncate(nullptr, options);
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase),
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
EMPTY,
|
||||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((collection->truncate(trx, options).ok()));
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -498,7 +498,7 @@ SECTION("test_drop") {
|
|||
}
|
||||
|
||||
CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView)));
|
||||
CHECK((TRI_ERROR_NO_ERROR == link0->drop()));
|
||||
CHECK((link0->drop().ok()));
|
||||
CHECK((true == (*dynamic_cast<arangodb::iresearch::IResearchLink*>(link0.get()) == *logicalView)));
|
||||
|
||||
// collection not in view after
|
||||
|
@ -649,7 +649,6 @@ SECTION("test_write") {
|
|||
REQUIRE((false == !link && created));
|
||||
auto reader = irs::directory_reader::open(directory);
|
||||
CHECK((0 == reader.reopen().live_docs_count()));
|
||||
CHECK((TRI_ERROR_BAD_PARAMETER == link->insert(nullptr, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).errorNumber()));
|
||||
{
|
||||
arangodb::transaction::Methods trx(
|
||||
arangodb::transaction::StandaloneContext::Create(vocbase),
|
||||
|
@ -659,7 +658,7 @@ SECTION("test_write") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -676,7 +675,7 @@ SECTION("test_write") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -693,7 +692,7 @@ SECTION("test_write") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->remove(&trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->remove(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
|
|
@ -461,7 +461,7 @@ SECTION("test_cleanup") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -479,7 +479,7 @@ SECTION("test_cleanup") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->remove(&trx, arangodb::LocalDocumentId(0), arangodb::velocypack::Slice::emptyObjectSlice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->remove(trx, arangodb::LocalDocumentId(0), arangodb::velocypack::Slice::emptyObjectSlice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -668,7 +668,7 @@ SECTION("test_drop_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -743,7 +743,7 @@ SECTION("test_drop_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -818,7 +818,7 @@ SECTION("test_drop_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -910,7 +910,7 @@ SECTION("test_drop_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -997,7 +997,7 @@ SECTION("test_drop_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -1158,7 +1158,7 @@ SECTION("test_truncate_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -1233,7 +1233,7 @@ SECTION("test_truncate_cid") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -1619,10 +1619,10 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -1675,8 +1675,8 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
link->batchInsert(&trx, batch, taskQueuePtr);
|
||||
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time
|
||||
link->batchInsert(trx, batch, taskQueuePtr);
|
||||
link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
|
||||
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
|
@ -1722,10 +1722,10 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
}
|
||||
|
@ -1772,10 +1772,10 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), docJson->slice(), arangodb::Index::OperationMode::normal).ok())); // 2nd time
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -1822,7 +1822,7 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), docJson->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -1872,8 +1872,8 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
link->batchInsert(&trx, batch, taskQueuePtr);
|
||||
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time
|
||||
link->batchInsert(trx, batch, taskQueuePtr);
|
||||
link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
|
||||
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK(view->commit().ok());
|
||||
|
@ -1926,8 +1926,8 @@ SECTION("test_insert") {
|
|||
|
||||
linkMeta._includeAllFields = true;
|
||||
CHECK((trx.begin().ok()));
|
||||
link->batchInsert(&trx, batch, taskQueuePtr);
|
||||
link->batchInsert(&trx, batch, taskQueuePtr); // 2nd time
|
||||
link->batchInsert(trx, batch, taskQueuePtr);
|
||||
link->batchInsert(trx, batch, taskQueuePtr); // 2nd time
|
||||
CHECK((TRI_ERROR_NO_ERROR == taskQueue.status()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
@ -2023,7 +2023,7 @@ SECTION("test_query") {
|
|||
CHECK((trx.begin().ok()));
|
||||
|
||||
for (size_t i = 0; i < 12; ++i) {
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
}
|
||||
|
||||
CHECK((trx.commit().ok()));
|
||||
|
@ -2489,7 +2489,7 @@ SECTION("test_unregister_link") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK((view->commit().ok()));
|
||||
}
|
||||
|
@ -2594,7 +2594,7 @@ SECTION("test_unregister_link") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
CHECK((view->commit().ok()));
|
||||
}
|
||||
|
@ -2873,7 +2873,7 @@ SECTION("test_tracked_cids") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
feature->executeCallbacks(); // commit to persisted store
|
||||
}
|
||||
|
@ -3271,7 +3271,7 @@ SECTION("test_transaction_snapshot") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -3351,7 +3351,7 @@ SECTION("test_transaction_snapshot") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(1), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
@ -6349,4 +6349,4 @@ SECTION("test_update_partial") {
|
|||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- END-OF-FILE
|
||||
// -----------------------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------
|
|
@ -573,7 +573,7 @@ SECTION("test_query") {
|
|||
CHECK((trx.begin().ok()));
|
||||
|
||||
for (size_t i = 0; i < 12; ++i) {
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
}
|
||||
|
||||
CHECK((trx.commit().ok()));
|
||||
|
@ -971,7 +971,7 @@ SECTION("test_transaction_snapshot") {
|
|||
arangodb::transaction::Options()
|
||||
);
|
||||
CHECK((trx.begin().ok()));
|
||||
CHECK((link->insert(&trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((link->insert(trx, arangodb::LocalDocumentId(0), doc->slice(), arangodb::Index::OperationMode::normal).ok()));
|
||||
CHECK((trx.commit().ok()));
|
||||
}
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ class EdgeIndexMock final : public arangodb::Index {
|
|||
}
|
||||
|
||||
arangodb::Result insert(
|
||||
arangodb::transaction::Methods*,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode
|
||||
|
@ -236,7 +236,7 @@ class EdgeIndexMock final : public arangodb::Index {
|
|||
}
|
||||
|
||||
arangodb::Result remove(
|
||||
arangodb::transaction::Methods*,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::LocalDocumentId const&,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode
|
||||
|
@ -593,8 +593,8 @@ std::shared_ptr<arangodb::Index> PhysicalCollectionMock::createIndex(arangodb::v
|
|||
);
|
||||
auto res = trx.begin();
|
||||
TRI_ASSERT(res.ok());
|
||||
|
||||
index->batchInsert(&trx, docs, taskQueuePtr);
|
||||
|
||||
index->batchInsert(trx, docs, taskQueuePtr);
|
||||
|
||||
if (TRI_ERROR_NO_ERROR != taskQueue.status()) {
|
||||
return nullptr;
|
||||
|
@ -602,7 +602,7 @@ std::shared_ptr<arangodb::Index> PhysicalCollectionMock::createIndex(arangodb::v
|
|||
|
||||
_indexes.emplace_back(std::move(index));
|
||||
created = true;
|
||||
|
||||
|
||||
res = trx.commit();
|
||||
TRI_ASSERT(res.ok());
|
||||
|
||||
|
@ -622,7 +622,7 @@ bool PhysicalCollectionMock::dropIndex(TRI_idx_iid_t iid) {
|
|||
|
||||
for (auto itr = _indexes.begin(), end = _indexes.end(); itr != end; ++itr) {
|
||||
if ((*itr)->id() == iid) {
|
||||
if (TRI_ERROR_NO_ERROR == (*itr)->drop()) {
|
||||
if ((*itr)->drop().ok()) {
|
||||
_indexes.erase(itr); return true;
|
||||
}
|
||||
}
|
||||
|
@ -685,7 +685,7 @@ arangodb::Result PhysicalCollectionMock::insert(
|
|||
result.setUnmanaged(documents.back().first.data(), docId);
|
||||
|
||||
for (auto& index : _indexes) {
|
||||
if (!index->insert(trx, docId, newSlice, arangodb::Index::OperationMode::normal).ok()) {
|
||||
if (!index->insert(*trx, docId, newSlice, arangodb::Index::OperationMode::normal).ok()) {
|
||||
return arangodb::Result(TRI_ERROR_BAD_PARAMETER);
|
||||
}
|
||||
}
|
||||
|
@ -859,12 +859,17 @@ bool PhysicalCollectionMock::readDocumentWithCallback(arangodb::transaction::Met
|
|||
}
|
||||
|
||||
arangodb::Result PhysicalCollectionMock::remove(
|
||||
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice,
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
arangodb::OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
arangodb::OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
arangodb::KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<arangodb::Result(void)> callbackDuringLock) {
|
||||
std::function<arangodb::Result(void)> callbackDuringLock
|
||||
) {
|
||||
TRI_ASSERT(callbackDuringLock == nullptr); // not implemented
|
||||
before();
|
||||
|
||||
|
@ -920,7 +925,10 @@ void PhysicalCollectionMock::setPath(std::string const& value) {
|
|||
physicalPath = value;
|
||||
}
|
||||
|
||||
arangodb::Result PhysicalCollectionMock::truncate(arangodb::transaction::Methods*, arangodb::OperationOptions&) {
|
||||
arangodb::Result PhysicalCollectionMock::truncate(
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::OperationOptions& options
|
||||
) {
|
||||
before();
|
||||
documents.clear();
|
||||
return arangodb::Result();
|
||||
|
|
|
@ -90,12 +90,17 @@ class PhysicalCollectionMock: public arangodb::PhysicalCollection {
|
|||
virtual bool readDocument(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::ManagedDocumentResult& result) const override;
|
||||
virtual bool readDocumentWithCallback(arangodb::transaction::Methods* trx, arangodb::LocalDocumentId const& token, arangodb::IndexIterator::DocumentCallback const& cb) const override;
|
||||
virtual arangodb::Result remove(
|
||||
arangodb::transaction::Methods* trx, arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
arangodb::OperationOptions& options, TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock, TRI_voc_rid_t& prevRev, TRI_voc_rid_t& revisionId,
|
||||
arangodb::KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<arangodb::Result(void)> callbackDuringLock) override;
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::velocypack::Slice slice,
|
||||
arangodb::ManagedDocumentResult& previous,
|
||||
arangodb::OperationOptions& options,
|
||||
TRI_voc_tick_t& resultMarkerTick,
|
||||
bool lock,
|
||||
TRI_voc_rid_t& prevRev,
|
||||
TRI_voc_rid_t& revisionId,
|
||||
arangodb::KeyLockInfo* /*keyLockInfo*/,
|
||||
std::function<arangodb::Result(void)> callbackDuringLock
|
||||
) override;
|
||||
virtual arangodb::Result replace(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
@ -106,7 +111,10 @@ class PhysicalCollectionMock: public arangodb::PhysicalCollection {
|
|||
std::function<arangodb::Result(void)> callbackDuringLock) override;
|
||||
virtual TRI_voc_rid_t revision(arangodb::transaction::Methods* trx) const override;
|
||||
virtual void setPath(std::string const&) override;
|
||||
virtual arangodb::Result truncate(arangodb::transaction::Methods* trx, arangodb::OperationOptions&) override;
|
||||
virtual arangodb::Result truncate(
|
||||
arangodb::transaction::Methods& trx,
|
||||
arangodb::OperationOptions& options
|
||||
) override;
|
||||
virtual arangodb::Result update(
|
||||
arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
|
Loading…
Reference in New Issue