mirror of https://gitee.com/bigwinds/arangodb
Return offending key for unique constraint violations (#3624)
This commit is contained in:
parent
733f27e997
commit
68bd31ac99
|
@ -527,7 +527,7 @@ void Index::batchInsert(
|
|||
std::vector<std::pair<LocalDocumentId, arangodb::velocypack::Slice>> const& documents,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
for (auto const& it : documents) {
|
||||
Result status = insert(trx, it.first, it.second, false);
|
||||
Result status = insert(trx, it.first, it.second, OperationMode::normal);
|
||||
if (status.errorNumber() != TRI_ERROR_NO_ERROR) {
|
||||
queue->setStatus(status.errorNumber());
|
||||
break;
|
||||
|
|
|
@ -88,6 +88,13 @@ class Index {
|
|||
TRI_IDX_TYPE_NO_ACCESS_INDEX
|
||||
};
|
||||
|
||||
// mode to signal how operation should behave
|
||||
enum OperationMode {
|
||||
normal,
|
||||
internal,
|
||||
rollback
|
||||
};
|
||||
|
||||
public:
|
||||
/// @brief return the index id
|
||||
inline TRI_idx_iid_t id() const { return _iid; }
|
||||
|
@ -244,10 +251,14 @@ class Index {
|
|||
virtual void toVelocyPackFigures(arangodb::velocypack::Builder&) const;
|
||||
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPackFigures() const;
|
||||
|
||||
virtual Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) = 0;
|
||||
virtual Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) = 0;
|
||||
virtual Result insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
virtual Result remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
|
||||
virtual void batchInsert(
|
||||
transaction::Methods*,
|
||||
|
|
|
@ -59,6 +59,15 @@ class IndexResult : public Result {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
IndexResult(int errorNumber, Index const* index, std::string key) :
|
||||
IndexResult(errorNumber, index) {
|
||||
// provide conflicting key
|
||||
if (key.length() > 0) {
|
||||
_errorMessage.append("; conflicting key: ");
|
||||
_errorMessage.append(key);
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
|
|
|
@ -259,7 +259,8 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(
|
|||
// insert into primary index
|
||||
Result res = state->_primaryIndex->insertKey(trx, localDocumentId,
|
||||
VPackSlice(vpack),
|
||||
state->_mmdr);
|
||||
state->_mmdr,
|
||||
Index::OperationMode::normal);
|
||||
|
||||
if (res.errorNumber() != TRI_ERROR_NO_ERROR) {
|
||||
physical->removeLocalDocumentId(localDocumentId, false);
|
||||
|
@ -394,7 +395,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(
|
|||
state->_dfi->numberDeletions++;
|
||||
|
||||
state->_primaryIndex->removeKey(trx, oldLocalDocumentId, VPackSlice(vpack),
|
||||
state->_mmdr);
|
||||
state->_mmdr, Index::OperationMode::normal);
|
||||
|
||||
physical->removeLocalDocumentId(oldLocalDocumentId, true);
|
||||
}
|
||||
|
@ -2907,8 +2908,8 @@ Result MMFilesCollection::insert(transaction::Methods* trx,
|
|||
|
||||
try {
|
||||
// insert into indexes
|
||||
res = insertDocument(trx, documentId, revisionId, doc, operation, marker,
|
||||
options.waitForSync);
|
||||
res = insertDocument(trx, documentId, revisionId, doc, operation,
|
||||
marker, options, options.waitForSync);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = Result(ex.code());
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -3056,26 +3057,27 @@ void MMFilesCollection::removeLocalDocumentId(LocalDocumentId const& documentId,
|
|||
/// @brief creates a new entry in the primary index
|
||||
Result MMFilesCollection::insertPrimaryIndex(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationOptions& options) {
|
||||
TRI_IF_FAILURE("InsertPrimaryIndex") { return Result(TRI_ERROR_DEBUG); }
|
||||
|
||||
// insert into primary index
|
||||
return primaryIndex()->insertKey(trx, documentId, doc);
|
||||
return primaryIndex()->insertKey(trx, documentId, doc, options.indexOpMode);
|
||||
}
|
||||
|
||||
/// @brief deletes an entry from the primary index
|
||||
Result MMFilesCollection::deletePrimaryIndex(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc, OperationOptions& options) {
|
||||
TRI_IF_FAILURE("DeletePrimaryIndex") { return Result(TRI_ERROR_DEBUG); }
|
||||
|
||||
return primaryIndex()->removeKey(trx, documentId, doc);
|
||||
return primaryIndex()->removeKey(trx, documentId, doc, options.indexOpMode);
|
||||
}
|
||||
|
||||
/// @brief creates a new entry in the secondary indexes
|
||||
Result MMFilesCollection::insertSecondaryIndexes(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, Index::OperationMode mode) {
|
||||
// Coordinator doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_IF_FAILURE("InsertSecondaryIndexes") { return Result(TRI_ERROR_DEBUG); }
|
||||
|
@ -3098,7 +3100,7 @@ Result MMFilesCollection::insertSecondaryIndexes(
|
|||
continue;
|
||||
}
|
||||
|
||||
Result res = idx->insert(trx, documentId, doc, isRollback);
|
||||
Result res = idx->insert(trx, documentId, doc, mode);
|
||||
|
||||
// in case of no-memory, return immediately
|
||||
if (res.errorNumber() == TRI_ERROR_OUT_OF_MEMORY) {
|
||||
|
@ -3119,7 +3121,7 @@ Result MMFilesCollection::insertSecondaryIndexes(
|
|||
/// @brief deletes an entry from the secondary indexes
|
||||
Result MMFilesCollection::deleteSecondaryIndexes(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, Index::OperationMode mode) {
|
||||
// Coordintor doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
||||
|
@ -3144,7 +3146,7 @@ Result MMFilesCollection::deleteSecondaryIndexes(
|
|||
continue;
|
||||
}
|
||||
|
||||
Result res = idx->remove(trx, documentId, doc, isRollback);
|
||||
Result res = idx->remove(trx, documentId, doc, mode);
|
||||
|
||||
if (res.fail()) {
|
||||
// an error occurred
|
||||
|
@ -3184,9 +3186,10 @@ int MMFilesCollection::detectIndexes(transaction::Methods* trx) {
|
|||
/// If it returns an error no documents are inserted
|
||||
Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationOptions& options) {
|
||||
// insert into primary index first
|
||||
Result res = insertPrimaryIndex(trx, documentId, doc);
|
||||
Result res = insertPrimaryIndex(trx, documentId, doc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
// insert has failed
|
||||
|
@ -3194,11 +3197,12 @@ Result MMFilesCollection::insertIndexes(arangodb::transaction::Methods* trx,
|
|||
}
|
||||
|
||||
// insert into secondary indexes
|
||||
res = insertSecondaryIndexes(trx, documentId, doc, false);
|
||||
res = insertSecondaryIndexes(trx, documentId, doc, options.indexOpMode);
|
||||
|
||||
if (res.fail()) {
|
||||
deleteSecondaryIndexes(trx, documentId, doc, true);
|
||||
deletePrimaryIndex(trx, documentId, doc);
|
||||
deleteSecondaryIndexes(trx, documentId, doc,
|
||||
Index::OperationMode::rollback);
|
||||
deletePrimaryIndex(trx, documentId, doc, options);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -3211,8 +3215,9 @@ Result MMFilesCollection::insertDocument(arangodb::transaction::Methods* trx,
|
|||
VPackSlice const& doc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options,
|
||||
bool& waitForSync) {
|
||||
Result res = insertIndexes(trx, documentId, doc);
|
||||
Result res = insertIndexes(trx, documentId, doc, options);
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
}
|
||||
|
@ -3338,8 +3343,9 @@ Result MMFilesCollection::update(
|
|||
result.reset();
|
||||
}
|
||||
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId, newDoc,
|
||||
operation, marker, options.waitForSync);
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
|
||||
newDoc, operation, marker, options,
|
||||
options.waitForSync);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = Result(ex.code());
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -3472,8 +3478,9 @@ Result MMFilesCollection::replace(
|
|||
result.reset();
|
||||
}
|
||||
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId, newDoc,
|
||||
operation, marker, options.waitForSync);
|
||||
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
|
||||
newDoc, operation, marker, options,
|
||||
options.waitForSync);
|
||||
} catch (basics::Exception const& ex) {
|
||||
res = Result(ex.code());
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -3591,17 +3598,20 @@ Result MMFilesCollection::remove(arangodb::transaction::Methods* trx,
|
|||
MMFilesDocumentDescriptor());
|
||||
|
||||
// delete from indexes
|
||||
res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc, false);
|
||||
res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
options.indexOpMode);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc);
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
|
@ -3654,12 +3664,13 @@ void MMFilesCollection::deferDropCollection(
|
|||
}
|
||||
|
||||
/// @brief rolls back a document operation
|
||||
Result MMFilesCollection::rollbackOperation(transaction::Methods* trx,
|
||||
TRI_voc_document_operation_e type,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
VPackSlice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
VPackSlice const& newDoc) {
|
||||
Result MMFilesCollection::rollbackOperation(
|
||||
transaction::Methods* trx, TRI_voc_document_operation_e type,
|
||||
LocalDocumentId const& oldDocumentId, VPackSlice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId, VPackSlice const& newDoc) {
|
||||
OperationOptions options;
|
||||
options.indexOpMode= Index::OperationMode::rollback;
|
||||
|
||||
if (type == TRI_VOC_DOCUMENT_OPERATION_INSERT) {
|
||||
TRI_ASSERT(oldDocumentId.empty());
|
||||
TRI_ASSERT(oldDoc.isNone());
|
||||
|
@ -3667,8 +3678,9 @@ Result MMFilesCollection::rollbackOperation(transaction::Methods* trx,
|
|||
TRI_ASSERT(!newDoc.isNone());
|
||||
|
||||
// ignore any errors we're getting from this
|
||||
deletePrimaryIndex(trx, newDocumentId, newDoc);
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc, true);
|
||||
deletePrimaryIndex(trx, newDocumentId, newDoc, options);
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc,
|
||||
Index::OperationMode::rollback);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -3680,9 +3692,11 @@ Result MMFilesCollection::rollbackOperation(transaction::Methods* trx,
|
|||
TRI_ASSERT(!newDoc.isNone());
|
||||
|
||||
// remove the current values from the indexes
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc, true);
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc,
|
||||
Index::OperationMode::rollback);
|
||||
// re-insert old state
|
||||
return insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
return insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
}
|
||||
|
||||
if (type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
|
||||
|
@ -3692,10 +3706,11 @@ Result MMFilesCollection::rollbackOperation(transaction::Methods* trx,
|
|||
TRI_ASSERT(newDocumentId.empty());
|
||||
TRI_ASSERT(newDoc.isNone());
|
||||
|
||||
Result res = insertPrimaryIndex(trx, oldDocumentId, oldDoc);
|
||||
Result res = insertPrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.ok()) {
|
||||
res = insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
res = insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
} else {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
|
||||
<< "error rolling back remove operation";
|
||||
|
@ -3754,17 +3769,20 @@ Result MMFilesCollection::removeFastPath(arangodb::transaction::Methods* trx,
|
|||
// delete from indexes
|
||||
Result res;
|
||||
try {
|
||||
res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc, false);
|
||||
res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
options.indexOpMode);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
THROW_ARANGO_EXCEPTION(res.errorNumber());
|
||||
}
|
||||
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc);
|
||||
res = deletePrimaryIndex(trx, oldDocumentId, oldDoc, options);
|
||||
|
||||
if (res.fail()) {
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
THROW_ARANGO_EXCEPTION(res.errorNumber());
|
||||
}
|
||||
|
||||
|
@ -3834,24 +3852,30 @@ Result MMFilesCollection::updateDocument(
|
|||
LocalDocumentId const& oldDocumentId,
|
||||
VPackSlice const& oldDoc, LocalDocumentId const& newDocumentId,
|
||||
VPackSlice const& newDoc, MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker, bool& waitForSync) {
|
||||
MMFilesWalMarker const* marker, OperationOptions& options,
|
||||
bool& waitForSync) {
|
||||
// remove old document from secondary indexes
|
||||
// (it will stay in the primary index as the key won't change)
|
||||
Result res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc, false);
|
||||
Result res = deleteSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
options.indexOpMode);
|
||||
|
||||
if (res.fail()) {
|
||||
// re-enter the document in case of failure, ignore errors during rollback
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
return res;
|
||||
}
|
||||
|
||||
// insert new document into secondary indexes
|
||||
res = insertSecondaryIndexes(trx, newDocumentId, newDoc, false);
|
||||
res = insertSecondaryIndexes(trx, newDocumentId, newDoc,
|
||||
options.indexOpMode);
|
||||
|
||||
if (res.fail()) {
|
||||
// rollback
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc, true);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc, true);
|
||||
deleteSecondaryIndexes(trx, newDocumentId, newDoc,
|
||||
Index::OperationMode::rollback);
|
||||
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
|
||||
Index::OperationMode::rollback);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -481,7 +481,8 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
MMFilesDocumentOperation& operation,
|
||||
MMFilesWalMarker const* marker, bool& waitForSync);
|
||||
MMFilesWalMarker const* marker,
|
||||
OperationOptions& options, bool& waitForSync);
|
||||
|
||||
private:
|
||||
uint8_t const* lookupDocumentVPack(LocalDocumentId const& documentId) const;
|
||||
|
@ -508,20 +509,21 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
/// @brief Detect all indexes form file
|
||||
int detectIndexes(transaction::Methods* trx);
|
||||
|
||||
Result insertIndexes(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc);
|
||||
Result insertIndexes(transaction::Methods* trx, LocalDocumentId const& documentId, velocypack::Slice const& doc, OperationOptions& options);
|
||||
|
||||
Result insertPrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&);
|
||||
Result insertPrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
|
||||
|
||||
Result deletePrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&);
|
||||
Result deletePrimaryIndex(transaction::Methods*, LocalDocumentId const& documentId, velocypack::Slice const&, OperationOptions& options);
|
||||
|
||||
Result insertSecondaryIndexes(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&, bool isRollback);
|
||||
Result insertSecondaryIndexes(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode);
|
||||
|
||||
Result deleteSecondaryIndexes(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&, bool isRollback);
|
||||
Result deleteSecondaryIndexes(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const&,
|
||||
Index::OperationMode mode);
|
||||
|
||||
Result lookupDocument(transaction::Methods*, velocypack::Slice,
|
||||
ManagedDocumentResult& result);
|
||||
|
@ -532,7 +534,8 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc,
|
||||
MMFilesDocumentOperation&,
|
||||
MMFilesWalMarker const*, bool& waitForSync);
|
||||
MMFilesWalMarker const*, OperationOptions& options,
|
||||
bool& waitForSync);
|
||||
|
||||
private:
|
||||
mutable arangodb::MMFilesDitches _ditches;
|
||||
|
|
|
@ -218,17 +218,20 @@ void MMFilesEdgeIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
}
|
||||
|
||||
Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId, VPackSlice const& doc,
|
||||
bool isRollback) {
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, 1);
|
||||
_edgesFrom->insert(&context, fromElement, true, isRollback);
|
||||
_edgesFrom->insert(&context, fromElement, true,
|
||||
mode == OperationMode::rollback);
|
||||
|
||||
try {
|
||||
_edgesTo->insert(&context, toElement, true, isRollback);
|
||||
_edgesTo->insert(&context, toElement, true,
|
||||
mode == OperationMode::rollback);
|
||||
} catch (std::bad_alloc const&) {
|
||||
// roll back partial insert
|
||||
_edgesFrom->remove(&context, fromElement);
|
||||
|
@ -243,8 +246,9 @@ Result MMFilesEdgeIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId, VPackSlice const& doc,
|
||||
bool isRollback) {
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
MMFilesSimpleIndexElement fromElement(buildFromElement(documentId, doc));
|
||||
MMFilesSimpleIndexElement toElement(buildToElement(documentId, doc));
|
||||
|
||||
|
@ -256,7 +260,7 @@ Result MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
|||
_edgesTo->remove(&context, toElement);
|
||||
return Result(TRI_ERROR_NO_ERROR);
|
||||
} catch (...) {
|
||||
if (isRollback) {
|
||||
if (mode == OperationMode::rollback) {
|
||||
return Result(TRI_ERROR_NO_ERROR);
|
||||
}
|
||||
return IndexResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND, this);
|
||||
|
|
|
@ -165,10 +165,10 @@ class MMFilesEdgeIndex final : public MMFilesIndex {
|
|||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&, OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&, OperationMode mode) override;
|
||||
|
||||
void batchInsert(transaction::Methods*,
|
||||
std::vector<std::pair<LocalDocumentId, VPackSlice>> const&,
|
||||
|
|
|
@ -200,7 +200,7 @@ bool MMFilesFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
|
||||
Result MMFilesFulltextIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
|
@ -212,7 +212,7 @@ Result MMFilesFulltextIndex::insert(transaction::Methods*,
|
|||
|
||||
Result MMFilesFulltextIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
|
|
|
@ -66,10 +66,12 @@ class MMFilesFulltextIndex final : public MMFilesIndex {
|
|||
bool matchesDefinition(VPackSlice const&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
|
|
@ -376,8 +376,9 @@ bool MMFilesGeoIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
Result MMFilesGeoIndex::insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
Result MMFilesGeoIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
double latitude;
|
||||
double longitude;
|
||||
|
||||
|
@ -445,8 +446,9 @@ Result MMFilesGeoIndex::insert(transaction::Methods*, LocalDocumentId const& doc
|
|||
return IndexResult();
|
||||
}
|
||||
|
||||
Result MMFilesGeoIndex::remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
Result MMFilesGeoIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
double latitude = 0.0;
|
||||
double longitude = 0.0;
|
||||
bool ok = true;
|
||||
|
|
|
@ -134,10 +134,11 @@ class MMFilesGeoIndex final : public MMFilesIndex {
|
|||
bool matchesDefinition(VPackSlice const& info) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&, OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/FixedSizeAllocator.h"
|
||||
#include "Basics/LocalTaskQueue.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Indexes/IndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
|
@ -472,19 +473,21 @@ bool MMFilesHashIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
}
|
||||
|
||||
Result MMFilesHashIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId, VPackSlice const& doc,
|
||||
bool isRollback) {
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
if (_unique) {
|
||||
return IndexResult(insertUnique(trx, documentId, doc, isRollback), this);
|
||||
return insertUnique(trx, documentId, doc, mode);
|
||||
}
|
||||
|
||||
return IndexResult(insertMulti(trx, documentId, doc, isRollback), this);
|
||||
return IndexResult(insertMulti(trx, documentId, doc, mode), this);
|
||||
}
|
||||
|
||||
/// @brief removes an entry from the hash array part of the hash index
|
||||
Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId, VPackSlice const& doc,
|
||||
bool isRollback) {
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
|
@ -498,9 +501,9 @@ Result MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
for (auto& hashElement : elements) {
|
||||
int result;
|
||||
if (_unique) {
|
||||
result = removeUniqueElement(trx, hashElement, isRollback);
|
||||
result = removeUniqueElement(trx, hashElement, mode);
|
||||
} else {
|
||||
result = removeMultiElement(trx, hashElement, isRollback);
|
||||
result = removeMultiElement(trx, hashElement, mode);
|
||||
}
|
||||
|
||||
// we may be looping through this multiple times, and if an error
|
||||
|
@ -589,9 +592,10 @@ int MMFilesHashIndex::lookup(
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
||||
Result MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
|
@ -601,13 +605,14 @@ int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
_allocator->deallocate(it);
|
||||
}
|
||||
|
||||
return res;
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, numPaths());
|
||||
|
||||
auto work = [this, &context](MMFilesHashIndexElement* element, bool) -> int {
|
||||
auto work = [this, &context](MMFilesHashIndexElement* element,
|
||||
OperationMode) -> int {
|
||||
TRI_IF_FAILURE("InsertHashIndex") { return TRI_ERROR_DEBUG; }
|
||||
return _uniqueArray->_hashArray->insert(&context, element);
|
||||
};
|
||||
|
@ -616,19 +621,33 @@ int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto hashElement = elements[i];
|
||||
res = work(hashElement, isRollback);
|
||||
res = work(hashElement, mode);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
IndexResult error(res, this);
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
LocalDocumentId rev(_uniqueArray->_hashArray->find(&context, hashElement)->localDocumentId());
|
||||
ManagedDocumentResult mmdr;
|
||||
_collection->getPhysical()->readDocument(trx, rev, mmdr);
|
||||
std::string existingId(
|
||||
VPackSlice(mmdr.vpack()).get(StaticStrings::KeyString).copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
error = IndexResult(res, existingId);
|
||||
} else {
|
||||
error = IndexResult(res, this, existingId);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t j = i; j < n; ++j) {
|
||||
// Free all elements that are not yet in the index
|
||||
_allocator->deallocate(elements[j]);
|
||||
}
|
||||
// Already indexed elements will be removed by the rollback
|
||||
break;
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
void MMFilesHashIndex::batchInsertUnique(
|
||||
|
@ -691,7 +710,7 @@ void MMFilesHashIndex::batchInsertUnique(
|
|||
|
||||
int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int res = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
||||
|
@ -705,7 +724,8 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, numPaths());
|
||||
|
||||
auto work = [this, &context](MMFilesHashIndexElement*& element, bool) {
|
||||
auto work = [this, &context](MMFilesHashIndexElement*& element,
|
||||
OperationMode) {
|
||||
TRI_IF_FAILURE("InsertHashIndex") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
@ -725,7 +745,7 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
auto hashElement = elements[i];
|
||||
|
||||
try {
|
||||
work(hashElement, isRollback);
|
||||
work(hashElement, mode);
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
res = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
|
@ -742,7 +762,7 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
for (size_t j = 0; j < i; ++j) {
|
||||
// Remove all already indexed elements and free them
|
||||
if (elements[j] != nullptr) {
|
||||
removeMultiElement(trx, elements[j], isRollback);
|
||||
removeMultiElement(trx, elements[j], mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -813,7 +833,7 @@ void MMFilesHashIndex::batchInsertMulti(
|
|||
|
||||
int MMFilesHashIndex::removeUniqueElement(transaction::Methods* trx,
|
||||
MMFilesHashIndexElement* element,
|
||||
bool isRollback) {
|
||||
OperationMode mode) {
|
||||
TRI_IF_FAILURE("RemoveHashIndex") { return TRI_ERROR_DEBUG; }
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, numPaths());
|
||||
|
@ -822,7 +842,8 @@ int MMFilesHashIndex::removeUniqueElement(transaction::Methods* trx,
|
|||
|
||||
if (old == nullptr) {
|
||||
// not found
|
||||
if (isRollback) { // ignore in this case, because it can happen
|
||||
if (mode == OperationMode::rollback) { // ignore in this case, because it
|
||||
// can happen
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
return TRI_ERROR_INTERNAL;
|
||||
|
@ -834,7 +855,7 @@ int MMFilesHashIndex::removeUniqueElement(transaction::Methods* trx,
|
|||
|
||||
int MMFilesHashIndex::removeMultiElement(transaction::Methods* trx,
|
||||
MMFilesHashIndexElement* element,
|
||||
bool isRollback) {
|
||||
OperationMode mode) {
|
||||
TRI_IF_FAILURE("RemoveHashIndex") { return TRI_ERROR_DEBUG; }
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, numPaths());
|
||||
|
@ -843,7 +864,8 @@ int MMFilesHashIndex::removeMultiElement(transaction::Methods* trx,
|
|||
|
||||
if (old == nullptr) {
|
||||
// not found
|
||||
if (isRollback) { // ignore in this case, because it can happen
|
||||
if (mode == OperationMode::rollback) { // ignore in this case, because it
|
||||
// can happen
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
return TRI_ERROR_INTERNAL;
|
||||
|
|
|
@ -287,10 +287,12 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
bool matchesDefinition(VPackSlice const& info) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void batchInsert(
|
||||
transaction::Methods*,
|
||||
|
@ -321,8 +323,8 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
int lookup(transaction::Methods*, arangodb::velocypack::Slice,
|
||||
std::vector<MMFilesHashIndexElement*>&) const;
|
||||
|
||||
int insertUnique(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback);
|
||||
Result insertUnique(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
|
||||
void batchInsertUnique(
|
||||
transaction::Methods*,
|
||||
|
@ -330,7 +332,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
int insertMulti(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback);
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
|
||||
void batchInsertMulti(
|
||||
transaction::Methods*,
|
||||
|
@ -338,9 +340,10 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
int removeUniqueElement(transaction::Methods*, MMFilesHashIndexElement*,
|
||||
bool);
|
||||
OperationMode mode);
|
||||
|
||||
int removeMultiElement(transaction::Methods*, MMFilesHashIndexElement*, bool);
|
||||
int removeMultiElement(transaction::Methods*, MMFilesHashIndexElement*,
|
||||
OperationMode mode);
|
||||
|
||||
bool accessFitsIndex(arangodb::aql::AstNode const* access,
|
||||
arangodb::aql::AstNode const* other,
|
||||
|
|
|
@ -275,6 +275,7 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
options.silent = true;
|
||||
options.ignoreRevs = true;
|
||||
options.isRestore = true;
|
||||
options.indexOpMode = Index::OperationMode::internal;
|
||||
if (!syncer._leaderId.empty()) {
|
||||
options.isSynchronousReplicationFrom = syncer._leaderId;
|
||||
}
|
||||
|
@ -660,23 +661,56 @@ Result handleSyncKeysMMFiles(arangodb::DatabaseInitialSyncer& syncer,
|
|||
|
||||
MMFilesSimpleIndexElement element = idx->lookupKey(&trx, keySlice);
|
||||
|
||||
auto removeConflict = [&](std::string conflictingKey) -> OperationResult {
|
||||
VPackBuilder conflict;
|
||||
conflict.add(VPackValue(conflictingKey));
|
||||
LocalDocumentId conflictId = physical->lookupKey(&trx, conflict.slice());
|
||||
if (conflictId.isSet()) {
|
||||
ManagedDocumentResult mmdr;
|
||||
bool success = physical->readDocument(&trx, conflictId, mmdr);
|
||||
if (success) {
|
||||
VPackSlice conflictingKey(mmdr.vpack());
|
||||
return trx.remove(collectionName, conflictingKey, options);
|
||||
}
|
||||
}
|
||||
return OperationResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||
};
|
||||
|
||||
if (!element) {
|
||||
// INSERT
|
||||
OperationResult opRes = trx.insert(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
if (opRes.errorMessage.empty()) {
|
||||
return Result(opRes.code);
|
||||
if (opRes.code == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && opRes.errorMessage > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
auto inner = removeConflict(opRes.errorMessage);
|
||||
if (inner.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
opRes = trx.insert(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
return Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
// UPDATE
|
||||
OperationResult opRes = trx.replace(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
if (opRes.errorMessage.empty()) {
|
||||
return Result(opRes.code);
|
||||
if (opRes.code == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && opRes.errorMessage > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
auto inner = removeConflict(opRes.errorMessage);
|
||||
if (inner.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
opRes = trx.update(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
return Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -219,7 +219,8 @@ size_t MMFilesPersistentIndex::memory() const {
|
|||
/// @brief inserts a document into the index
|
||||
Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
|
||||
int res;
|
||||
|
@ -322,6 +323,7 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
rocksdb::ReadOptions readOptions;
|
||||
|
||||
size_t const count = elements.size();
|
||||
std::string existingId;
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
if (_unique) {
|
||||
bool uniqueConstraintViolated = false;
|
||||
|
@ -338,6 +340,10 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
|
||||
if (res <= 0) {
|
||||
uniqueConstraintViolated = true;
|
||||
VPackSlice slice(comparator->extractKeySlice(iterator->key()));
|
||||
uint64_t length = slice.length();
|
||||
TRI_ASSERT(length > 0);
|
||||
existingId = slice.at(length - 1).copyString();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,13 +384,21 @@ Result MMFilesPersistentIndex::insert(transaction::Methods* trx,
|
|||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingId);
|
||||
}
|
||||
return IndexResult(res, this, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
/// @brief removes a document from the index
|
||||
Result MMFilesPersistentIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
|
||||
int res;
|
||||
|
|
|
@ -162,10 +162,12 @@ class MMFilesPersistentIndex final : public MMFilesPathBasedIndex {
|
|||
}
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void unload() override {}
|
||||
|
||||
|
|
|
@ -245,8 +245,9 @@ void MMFilesPrimaryIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
_primaryIndex->appendToVelocyPack(builder);
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::insert(transaction::Methods*, LocalDocumentId const&,
|
||||
VPackSlice const&, bool) {
|
||||
Result MMFilesPrimaryIndex::insert(transaction::Methods*,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const&, OperationMode) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "insert() called for primary index";
|
||||
|
@ -255,8 +256,9 @@ Result MMFilesPrimaryIndex::insert(transaction::Methods*, LocalDocumentId const&
|
|||
"insert() called for primary index");
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::remove(transaction::Methods*, LocalDocumentId const&,
|
||||
VPackSlice const&, bool) {
|
||||
Result MMFilesPrimaryIndex::remove(transaction::Methods*,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const&, OperationMode) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "remove() called for primary index";
|
||||
|
@ -366,28 +368,51 @@ MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupSequentialReverse(
|
|||
/// returns a status code, and *found will contain a found element (if any)
|
||||
Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, 1);
|
||||
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
|
||||
|
||||
return IndexResult(_primaryIndex->insert(&context, element), this);
|
||||
int res = _primaryIndex->insert(&context, element);
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
std::string existingId(doc.get(StaticStrings::KeyString).copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingId);
|
||||
}
|
||||
return IndexResult(res, this, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc,
|
||||
ManagedDocumentResult& mmdr) {
|
||||
ManagedDocumentResult& mmdr,
|
||||
OperationMode mode) {
|
||||
IndexLookupContext context(trx, _collection, &mmdr, 1);
|
||||
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
|
||||
|
||||
return IndexResult(_primaryIndex->insert(&context, element), this);
|
||||
int res = _primaryIndex->insert(&context, element);
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
std::string existingId(doc.get(StaticStrings::KeyString).copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingId);
|
||||
}
|
||||
return IndexResult(res, this, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
/// @brief removes an key/element from the index
|
||||
Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
ManagedDocumentResult result;
|
||||
IndexLookupContext context(trx, _collection, &result, 1);
|
||||
|
||||
|
@ -405,7 +430,8 @@ Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
|||
Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
|
||||
LocalDocumentId const&,
|
||||
VPackSlice const& doc,
|
||||
ManagedDocumentResult& mmdr) {
|
||||
ManagedDocumentResult& mmdr,
|
||||
OperationMode mode) {
|
||||
IndexLookupContext context(trx, _collection, &mmdr, 1);
|
||||
|
||||
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(doc));
|
||||
|
|
|
@ -201,10 +201,12 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
|
|||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void load() override {}
|
||||
void unload() override;
|
||||
|
@ -248,14 +250,16 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
|
|||
transaction::Methods*, arangodb::basics::BucketPosition& position);
|
||||
|
||||
Result insertKey(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&);
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
Result insertKey(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, ManagedDocumentResult&);
|
||||
arangodb::velocypack::Slice const&, ManagedDocumentResult&,
|
||||
OperationMode mode);
|
||||
|
||||
Result removeKey(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&);
|
||||
arangodb::velocypack::Slice const&, OperationMode mode);
|
||||
Result removeKey(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, ManagedDocumentResult&);
|
||||
arangodb::velocypack::Slice const&, ManagedDocumentResult&,
|
||||
OperationMode mode);
|
||||
|
||||
int resize(transaction::Methods*, size_t);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "Indexes/IndexLookupContext.h"
|
||||
#include "Indexes/IndexResult.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
@ -710,7 +711,7 @@ void MMFilesSkiplistIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
/// @brief inserts a document into a skiplist index
|
||||
Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
|
||||
int res;
|
||||
|
@ -739,10 +740,13 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
// by the index
|
||||
size_t const count = elements.size();
|
||||
|
||||
int badIndex = 0;
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
res = _skiplistIndex->insert(&context, elements[i]);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
badIndex = i;
|
||||
|
||||
// Note: this element is freed already
|
||||
for (size_t j = i; j < count; ++j) {
|
||||
_allocator->deallocate(elements[j]);
|
||||
|
@ -756,17 +760,60 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
|
|||
// We ignore unique_constraint violated if we are not unique
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
elements.clear();
|
||||
|
||||
// need to rebuild elements, find conflicting key to return error,
|
||||
// and then free elements again
|
||||
int innerRes = TRI_ERROR_NO_ERROR;
|
||||
try {
|
||||
innerRes = fillElement<MMFilesSkiplistIndexElement>(elements, documentId, doc);
|
||||
} catch (basics::Exception const& ex) {
|
||||
innerRes = ex.code();
|
||||
} catch (std::bad_alloc const&) {
|
||||
innerRes = TRI_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
innerRes = TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
auto cleanup = [this, &elements] {
|
||||
for (auto& element : elements) {
|
||||
// free all elements to prevent leak
|
||||
_allocator->deallocate(element);
|
||||
}
|
||||
};
|
||||
TRI_DEFER(cleanup());
|
||||
|
||||
if (innerRes != TRI_ERROR_NO_ERROR) {
|
||||
return IndexResult(innerRes, this);
|
||||
}
|
||||
|
||||
auto found = _skiplistIndex->rightLookup(&context, elements[badIndex]);
|
||||
TRI_ASSERT(found);
|
||||
LocalDocumentId rev(found->document()->localDocumentId());
|
||||
ManagedDocumentResult mmdr;
|
||||
_collection->getPhysical()->readDocument(trx, rev, mmdr);
|
||||
std::string existingId(VPackSlice(mmdr.vpack())
|
||||
.get(StaticStrings::KeyString)
|
||||
.copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingId);
|
||||
}
|
||||
return IndexResult(res, this, existingId);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
/// @brief removes a document from a skiplist index
|
||||
Result MMFilesSkiplistIndex::remove(transaction::Methods* trx,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
VPackSlice const& doc, OperationMode mode) {
|
||||
std::vector<MMFilesSkiplistIndexElement*> elements;
|
||||
|
||||
int res;
|
||||
|
|
|
@ -285,10 +285,12 @@ class MMFilesSkiplistIndex final : public MMFilesPathBasedIndex {
|
|||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
Result insert(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result remove(transaction::Methods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
void unload() override;
|
||||
|
||||
|
|
|
@ -873,7 +873,7 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
|||
state->prepareOperation(_logicalCollection->cid(), revisionId, StringRef(),
|
||||
TRI_VOC_DOCUMENT_OPERATION_INSERT);
|
||||
|
||||
res = insertDocument(trx, documentId, newSlice, options.waitForSync);
|
||||
res = insertDocument(trx, documentId, newSlice, options, options.waitForSync);
|
||||
if (res.ok()) {
|
||||
Result lookupResult = lookupDocumentVPack(documentId, trx, mdr, false);
|
||||
|
||||
|
@ -978,7 +978,7 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
|
|||
state->prepareOperation(_logicalCollection->cid(), revisionId, StringRef(),
|
||||
TRI_VOC_DOCUMENT_OPERATION_UPDATE);
|
||||
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc,
|
||||
options.waitForSync);
|
||||
options, options.waitForSync);
|
||||
|
||||
if (res.ok()) {
|
||||
mdr.setManaged(newDoc.begin(), documentId);
|
||||
|
@ -1075,7 +1075,8 @@ Result RocksDBCollection::replace(
|
|||
TRI_VOC_DOCUMENT_OPERATION_REPLACE);
|
||||
|
||||
RocksDBOperationResult opResult = updateDocument(
|
||||
trx, oldDocumentId, oldDoc, documentId, newDoc, options.waitForSync);
|
||||
trx, oldDocumentId, oldDoc, documentId, newDoc, options,
|
||||
options.waitForSync);
|
||||
if (opResult.ok()) {
|
||||
mdr.setManaged(newDoc.begin(), documentId);
|
||||
TRI_ASSERT(!mdr.empty());
|
||||
|
@ -1153,9 +1154,10 @@ Result RocksDBCollection::remove(arangodb::transaction::Methods* trx,
|
|||
[&state]() { state->resetLogState(); });
|
||||
|
||||
// add possible log statement under guard
|
||||
state->prepareOperation(_logicalCollection->cid(), documentId.id(), StringRef(key),
|
||||
TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
res = removeDocument(trx, oldDocumentId, oldDoc, false, options.waitForSync);
|
||||
state->prepareOperation(_logicalCollection->cid(), documentId.id(),
|
||||
StringRef(key),TRI_VOC_DOCUMENT_OPERATION_REMOVE);
|
||||
res = removeDocument(trx, oldDocumentId, oldDoc, options, false,
|
||||
options.waitForSync);
|
||||
if (res.ok()) {
|
||||
// report key size
|
||||
res = state->addOperation(_logicalCollection->cid(), documentId.id(),
|
||||
|
@ -1315,7 +1317,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
arangodb::Result res;
|
||||
auto cb = [&](LocalDocumentId const& documentId, VPackSlice slice) {
|
||||
if (res.ok()) {
|
||||
res = ridx->insertInternal(trx, &batched, documentId, slice);
|
||||
res = ridx->insertInternal(trx, &batched, documentId, slice,
|
||||
Index::OperationMode::normal);
|
||||
if (res.ok()) {
|
||||
numDocsWritten++;
|
||||
}
|
||||
|
@ -1352,7 +1355,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
this->readDocument(trx, token, mmdr)) {
|
||||
// we need to remove already inserted documents up to numDocsWritten
|
||||
res2 = ridx->removeInternal(trx, &batched, mmdr.localDocumentId(),
|
||||
VPackSlice(mmdr.vpack()));
|
||||
VPackSlice(mmdr.vpack()),
|
||||
Index::OperationMode::rollback);
|
||||
if (res2.ok()) {
|
||||
numDocsWritten--;
|
||||
}
|
||||
|
@ -1375,7 +1379,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
|
||||
RocksDBOperationResult RocksDBCollection::insertDocument(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool& waitForSync) const {
|
||||
VPackSlice const& doc, OperationOptions& options, bool& waitForSync) const {
|
||||
RocksDBOperationResult res;
|
||||
// Coordinator doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
@ -1399,7 +1403,8 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
|
|||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
|
||||
Result tmpres = rIdx->insertInternal(trx, mthd, documentId, doc);
|
||||
Result tmpres = rIdx->insertInternal(trx, mthd, documentId, doc,
|
||||
options.indexOpMode);
|
||||
if (!tmpres.ok()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
|
@ -1428,7 +1433,8 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
|
|||
|
||||
RocksDBOperationResult RocksDBCollection::removeDocument(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc, bool isUpdate, bool& waitForSync) const {
|
||||
VPackSlice const& doc, OperationOptions& options, bool isUpdate,
|
||||
bool& waitForSync) const {
|
||||
// Coordinator doesn't know index internals
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
|
@ -1460,7 +1466,7 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
|
|||
RocksDBOperationResult resInner;
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
Result tmpres = idx->remove(trx, documentId, doc, false);
|
||||
Result tmpres = idx->remove(trx, documentId, doc, options.indexOpMode);
|
||||
if (!tmpres.ok()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
|
@ -1505,7 +1511,8 @@ RocksDBOperationResult RocksDBCollection::lookupDocument(
|
|||
RocksDBOperationResult RocksDBCollection::updateDocument(
|
||||
transaction::Methods* trx, LocalDocumentId const& oldDocumentId,
|
||||
VPackSlice const& oldDoc, LocalDocumentId const& newDocumentId,
|
||||
VPackSlice const& newDoc, bool& waitForSync) const {
|
||||
VPackSlice const& newDoc, OperationOptions& options,
|
||||
bool& waitForSync) const {
|
||||
// keysize in return value is set by insertDocument
|
||||
|
||||
// Coordinator doesn't know index internals
|
||||
|
@ -1546,7 +1553,8 @@ RocksDBOperationResult RocksDBCollection::updateDocument(
|
|||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
|
||||
Result tmpres = rIdx->updateInternal(trx, mthd, oldDocumentId, oldDoc,
|
||||
newDocumentId, newDoc);
|
||||
newDocumentId, newDoc,
|
||||
options.indexOpMode);
|
||||
if (!tmpres.ok()) {
|
||||
if (tmpres.is(TRI_ERROR_OUT_OF_MEMORY)) {
|
||||
// in case of OOM return immediately
|
||||
|
|
|
@ -229,12 +229,13 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
|
||||
arangodb::RocksDBOperationResult insertDocument(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc, bool& waitForSync) const;
|
||||
arangodb::velocypack::Slice const& doc, OperationOptions& options,
|
||||
bool& waitForSync) const;
|
||||
|
||||
arangodb::RocksDBOperationResult removeDocument(
|
||||
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc, bool isUpdate,
|
||||
bool& waitForSync) const;
|
||||
arangodb::velocypack::Slice const& doc, OperationOptions& options,
|
||||
bool isUpdate, bool& waitForSync) const;
|
||||
|
||||
arangodb::RocksDBOperationResult lookupDocument(
|
||||
transaction::Methods* trx, arangodb::velocypack::Slice const& key,
|
||||
|
@ -242,8 +243,10 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
|
||||
arangodb::RocksDBOperationResult updateDocument(
|
||||
transaction::Methods* trx, LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc, LocalDocumentId const& newDocumentId,
|
||||
arangodb::velocypack::Slice const& newDoc, bool& waitForSync) const;
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
arangodb::velocypack::Slice const& newDoc, OperationOptions& options,
|
||||
bool& waitForSync) const;
|
||||
|
||||
arangodb::Result lookupDocumentVPack(LocalDocumentId const& documentId,
|
||||
transaction::Methods*,
|
||||
|
|
|
@ -447,7 +447,8 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
|||
Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
|
@ -478,7 +479,8 @@ Result RocksDBEdgeIndex::insertInternal(transaction::Methods* trx,
|
|||
Result RocksDBEdgeIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
// VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
auto fromToRef = StringRef(fromTo);
|
||||
|
|
|
@ -165,11 +165,13 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
|
||||
Result insertInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
protected:
|
||||
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
|
||||
|
|
|
@ -171,7 +171,8 @@ bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
@ -199,7 +200,8 @@ Result RocksDBFulltextIndex::insertInternal(transaction::Methods* trx,
|
|||
Result RocksDBFulltextIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return IndexResult();
|
||||
|
|
|
@ -107,11 +107,14 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
|
|||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
private:
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
|
|
@ -413,7 +413,8 @@ bool RocksDBGeoIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc) {
|
||||
velocypack::Slice const& doc,
|
||||
OperationMode mode) {
|
||||
// GeoIndex is always exclusively write-locked with rocksdb
|
||||
double latitude;
|
||||
double longitude;
|
||||
|
@ -483,7 +484,8 @@ Result RocksDBGeoIndex::insertInternal(transaction::Methods* trx,
|
|||
Result RocksDBGeoIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode) {
|
||||
// GeoIndex is always exclusively write-locked with rocksdb
|
||||
double latitude = 0.0;
|
||||
double longitude = 0.0;
|
||||
|
|
|
@ -163,12 +163,14 @@ class RocksDBGeoIndex final : public RocksDBIndex {
|
|||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
private:
|
||||
/// internal insert function, set batch or trx before calling
|
||||
|
|
|
@ -55,6 +55,7 @@ Result syncChunkRocksDB(DatabaseInitialSyncer& syncer,
|
|||
options.silent = true;
|
||||
options.ignoreRevs = true;
|
||||
options.isRestore = true;
|
||||
options.indexOpMode = Index::OperationMode::internal;
|
||||
if (!syncer._leaderId.empty()) {
|
||||
options.isSynchronousReplicationFrom = syncer._leaderId;
|
||||
}
|
||||
|
@ -282,23 +283,56 @@ Result syncChunkRocksDB(DatabaseInitialSyncer& syncer,
|
|||
|
||||
LocalDocumentId const documentId = physical->lookupKey(trx, keySlice);
|
||||
|
||||
auto removeConflict = [&](std::string conflictingKey) -> OperationResult {
|
||||
VPackBuilder conflict;
|
||||
conflict.add(VPackValue(conflictingKey));
|
||||
LocalDocumentId conflictId = physical->lookupKey(trx, conflict.slice());
|
||||
if (conflictId.isSet()) {
|
||||
ManagedDocumentResult mmdr;
|
||||
bool success = physical->readDocument(trx, conflictId, mmdr);
|
||||
if (success) {
|
||||
VPackSlice conflictingKey(mmdr.vpack());
|
||||
return trx->remove(collectionName, conflictingKey, options);
|
||||
}
|
||||
}
|
||||
return OperationResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND);
|
||||
};
|
||||
|
||||
if (!documentId.isSet()) {
|
||||
// INSERT
|
||||
OperationResult opRes = trx->insert(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
if (opRes.errorMessage.empty()) {
|
||||
return Result(opRes.code);
|
||||
if (opRes.code == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && opRes.errorMessage > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
auto inner = removeConflict(opRes.errorMessage);
|
||||
if (inner.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
opRes = trx->insert(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
return Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
// UPDATE
|
||||
OperationResult opRes = trx->update(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
if (opRes.errorMessage.empty()) {
|
||||
return Result(opRes.code);
|
||||
if (opRes.code == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && opRes.errorMessage > keySlice.copyString()) {
|
||||
// remove conflict and retry
|
||||
auto inner = removeConflict(opRes.errorMessage);
|
||||
if (inner.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
opRes = trx->update(collectionName, it, options);
|
||||
if (opRes.code != TRI_ERROR_NO_ERROR) {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
} else {
|
||||
return opRes.errorMessage.empty() ? Result(opRes.code) : Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
return Result(opRes.code, opRes.errorMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -240,12 +240,13 @@ Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* m
|
|||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
arangodb::velocypack::Slice const& newDoc) {
|
||||
Result res = removeInternal(trx, mthd, oldDocumentId, oldDoc);
|
||||
arangodb::velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
Result res = removeInternal(trx, mthd, oldDocumentId, oldDoc, mode);
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
}
|
||||
return insertInternal(trx, mthd, newDocumentId, newDoc);
|
||||
return insertInternal(trx, mthd, newDocumentId, newDoc, mode);
|
||||
}
|
||||
|
||||
void RocksDBIndex::truncate(transaction::Methods* trx) {
|
||||
|
|
|
@ -94,15 +94,16 @@ class RocksDBIndex : public Index {
|
|||
}
|
||||
|
||||
Result insert(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc, bool) override {
|
||||
velocypack::Slice const& doc, OperationMode mode) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(trx);
|
||||
return insertInternal(trx, mthds, documentId, doc);
|
||||
return insertInternal(trx, mthds, documentId, doc, mode);
|
||||
}
|
||||
|
||||
Result remove(transaction::Methods* trx, LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const& doc, bool) override {
|
||||
arangodb::velocypack::Slice const& doc,
|
||||
OperationMode mode) override {
|
||||
auto mthds = RocksDBTransactionState::toMethods(trx);
|
||||
return removeInternal(trx, mthds, documentId, doc);
|
||||
return removeInternal(trx, mthds, documentId, doc, mode);
|
||||
}
|
||||
|
||||
void setCacheEnabled(bool enable) {
|
||||
|
@ -121,18 +122,21 @@ class RocksDBIndex : public Index {
|
|||
/// insert index elements into the specified write batch.
|
||||
virtual Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
|
||||
virtual Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc);
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode);
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
virtual Result removeInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) = 0;
|
||||
|
||||
rocksdb::ColumnFamilyHandle* columnFamily() const { return _cf; }
|
||||
|
||||
|
|
|
@ -219,14 +219,22 @@ LocalDocumentId RocksDBPrimaryIndex::lookupKey(transaction::Methods* trx,
|
|||
Result RocksDBPrimaryIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice) {
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(slice);
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(_objectId, StringRef(keySlice));
|
||||
auto value = RocksDBValue::PrimaryIndexValue(documentId.id());
|
||||
|
||||
if (mthd->Exists(_cf, key.ref())) {
|
||||
return IndexResult(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, this);
|
||||
std::string existingId(slice.get(StaticStrings::KeyString).copyString());
|
||||
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED,
|
||||
existingId);
|
||||
}
|
||||
return IndexResult(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED, this,
|
||||
existingId);
|
||||
}
|
||||
|
||||
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
|
||||
|
@ -240,7 +248,8 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc) {
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
VPackSlice keySlice = transaction::helpers::extractKeyFromDocument(oldDoc);
|
||||
TRI_ASSERT(keySlice == oldDoc.get(StaticStrings::KeyString));
|
||||
RocksDBKeyLeaser key(trx);
|
||||
|
@ -257,7 +266,8 @@ Result RocksDBPrimaryIndex::updateInternal(transaction::Methods* trx,
|
|||
Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthd,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& slice) {
|
||||
VPackSlice const& slice,
|
||||
OperationMode mode) {
|
||||
// TODO: deal with matching revisions?
|
||||
RocksDBKeyLeaser key(trx);
|
||||
key->constructPrimaryIndexValue(
|
||||
|
|
|
@ -128,18 +128,21 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
/// insert index elements into the specified write batch.
|
||||
Result insertInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc) override;
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) override;
|
||||
|
||||
/// remove index elements and put it in the specified write batch.
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
protected:
|
||||
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
|
||||
|
|
|
@ -554,7 +554,8 @@ void RocksDBVPackIndex::fillPaths(std::vector<std::vector<std::string>>& paths,
|
|||
Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<RocksDBKey> elements;
|
||||
std::vector<uint64_t> hashes;
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
@ -573,13 +574,15 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
: RocksDBValue::VPackIndexValue();
|
||||
|
||||
size_t const count = elements.size();
|
||||
RocksDBValue existing =
|
||||
RocksDBValue::Empty(RocksDBEntryType::UniqueVPackIndexValue);
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
RocksDBKey& key = elements[i];
|
||||
if (_unique) {
|
||||
RocksDBValue existing =
|
||||
RocksDBValue::Empty(RocksDBEntryType::UniqueVPackIndexValue);
|
||||
if (mthds->Exists(_cf, key)) {
|
||||
res = TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
auto found = mthds->Get(_cf, key, existing.buffer());
|
||||
TRI_ASSERT(found.ok());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -613,6 +616,19 @@ Result RocksDBVPackIndex::insertInternal(transaction::Methods* trx,
|
|||
}
|
||||
}
|
||||
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
|
||||
LocalDocumentId rev(RocksDBValue::revisionId(existing));
|
||||
ManagedDocumentResult mmdr;
|
||||
bool success = _collection->getPhysical()->readDocument(trx, rev, mmdr);
|
||||
TRI_ASSERT(success);
|
||||
std::string existingKey(
|
||||
VPackSlice(mmdr.vpack()).get(StaticStrings::KeyString).copyString());
|
||||
if (mode == OperationMode::internal) {
|
||||
return IndexResult(res, existingKey);
|
||||
}
|
||||
return IndexResult(res, this, existingKey);
|
||||
}
|
||||
|
||||
return IndexResult(res, this);
|
||||
}
|
||||
|
||||
|
@ -621,13 +637,14 @@ Result RocksDBVPackIndex::updateInternal(transaction::Methods* trx,
|
|||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc) {
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) {
|
||||
|
||||
if (!_unique || _useExpansion) {
|
||||
// only unique index supports in-place updates
|
||||
// lets also not handle the complex case of expanded arrays
|
||||
return RocksDBIndex::updateInternal(trx, mthds, oldDocumentId, oldDoc,
|
||||
newDocumentId, newDoc);
|
||||
newDocumentId, newDoc, mode);
|
||||
} else {
|
||||
|
||||
bool equal = true;
|
||||
|
@ -651,7 +668,7 @@ Result RocksDBVPackIndex::updateInternal(transaction::Methods* trx,
|
|||
if (!equal) {
|
||||
// we can only use in-place updates if no indexed attributes changed
|
||||
return RocksDBIndex::updateInternal(trx, mthds, oldDocumentId, oldDoc,
|
||||
newDocumentId, newDoc);
|
||||
newDocumentId, newDoc, mode);
|
||||
}
|
||||
|
||||
// more expansive method to
|
||||
|
@ -695,7 +712,8 @@ Result RocksDBVPackIndex::updateInternal(transaction::Methods* trx,
|
|||
Result RocksDBVPackIndex::removeInternal(transaction::Methods* trx,
|
||||
RocksDBMethods* mthds,
|
||||
LocalDocumentId const& documentId,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice const& doc,
|
||||
OperationMode mode) {
|
||||
std::vector<RocksDBKey> elements;
|
||||
std::vector<uint64_t> hashes;
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
|
|
@ -197,17 +197,20 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
protected:
|
||||
Result insertInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result updateInternal(transaction::Methods* trx, RocksDBMethods*,
|
||||
LocalDocumentId const& oldDocumentId,
|
||||
arangodb::velocypack::Slice const& oldDoc,
|
||||
LocalDocumentId const& newDocumentId,
|
||||
velocypack::Slice const& newDoc) override;
|
||||
velocypack::Slice const& newDoc,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result removeInternal(transaction::Methods*, RocksDBMethods*,
|
||||
LocalDocumentId const& documentId,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
arangodb::velocypack::Slice const&,
|
||||
OperationMode mode) override;
|
||||
|
||||
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
|
||||
rocksdb::Slice const& value) override;
|
||||
|
|
|
@ -1925,8 +1925,8 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
resultBuilder.clear();
|
||||
}
|
||||
|
||||
return OperationResult(resultBuilder.steal(), nullptr, "", res.errorNumber(),
|
||||
options.waitForSync, errorCounter);
|
||||
return OperationResult(resultBuilder.steal(), nullptr, res.errorMessage(),
|
||||
res.errorNumber(), options.waitForSync, errorCounter);
|
||||
}
|
||||
|
||||
/// @brief remove one or multiple documents in a collection
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define ARANGOD_UTILS_OPERATION_OPTIONS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Indexes/Index.h"
|
||||
|
||||
namespace arangodb {
|
||||
// a struct for keeping document modification operations in transactions
|
||||
|
@ -32,7 +33,8 @@ struct OperationOptions {
|
|||
OperationOptions()
|
||||
: recoveryData(nullptr), waitForSync(false), keepNull(true),
|
||||
mergeObjects(true), silent(false), ignoreRevs(true),
|
||||
returnOld(false), returnNew(false), isRestore(false) {}
|
||||
returnOld(false), returnNew(false), isRestore(false),
|
||||
indexOpMode(Index::OperationMode::normal) {}
|
||||
|
||||
// original marker, set by an engine's recovery procedure only!
|
||||
void* recoveryData;
|
||||
|
@ -67,6 +69,8 @@ struct OperationOptions {
|
|||
// operation if we are merely a follower. Finally, we must deny replications
|
||||
// from the wrong leader.
|
||||
std::string isSynchronousReplicationFrom;
|
||||
|
||||
Index::OperationMode indexOpMode;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -1307,6 +1307,6 @@ function ReplicationIncrementalKeyConflict() {
|
|||
jsunity.run(ReplicationSuite);
|
||||
jsunity.run(ReplicationOtherDBSuite);
|
||||
// TODO: activate this test once it works
|
||||
// jsunity.run(ReplicationIncrementalKeyConflict);
|
||||
jsunity.run(ReplicationIncrementalKeyConflict);
|
||||
|
||||
return jsunity.done();
|
||||
|
|
Loading…
Reference in New Issue