1
0
Fork 0

Bug fix 3.4/arrayindex unique update (#8422)

This commit is contained in:
Wilfried Goesgens 2019-03-14 18:13:05 +01:00 committed by Jan
parent 9c130b4e38
commit 68ce741b13
5 changed files with 100 additions and 3 deletions

View File

@ -1,6 +1,8 @@
v3.4.5 (XXXX-XX-XX)
-------------------
* fix uniq array indices not accepting document updates
* when restarting a follower in active failover mode, try an incremental sync instead
of a full resync

View File

@ -242,6 +242,11 @@ Result RocksDBIndex::updateInternal(transaction::Methods* trx, RocksDBMethods* m
// RocksDBPrimaryIndex must override this method accordingly
TRI_ASSERT(type() != TRI_IDX_TYPE_PRIMARY_INDEX);
/// only if the insert needs to see the changes of the update, enable indexing:
IndexingEnabler enabler(mthd, mthd->isIndexingDisabled() && hasExpansion() && unique());
TRI_ASSERT((hasExpansion() && unique()) ? !mthd->isIndexingDisabled() : true);
Result res = removeInternal(trx, mthd, oldDocumentId, oldDoc, mode);
if (!res.ok()) {
return res;

View File

@ -209,11 +209,13 @@ bool RocksDBTrxMethods::DisableIndexing() {
return false;
}
void RocksDBTrxMethods::EnableIndexing() {
bool RocksDBTrxMethods::EnableIndexing() {
if (_indexingDisabled) {
_state->_rocksTransaction->EnableIndexing();
_indexingDisabled = false;
return true;
}
return false;
}
RocksDBTrxMethods::RocksDBTrxMethods(RocksDBTransactionState* state)

View File

@ -78,12 +78,14 @@ class RocksDBMethods {
/// @brief read options for use with iterators
rocksdb::ReadOptions iteratorReadOptions();
virtual bool isIndexingDisabled() const { return false; }
/// @brief returns true if indexing was disabled by this call
/// the default implementation is to do nothing
virtual bool DisableIndexing() { return false; }
// the default implementation is to do nothing
virtual void EnableIndexing() {}
virtual bool EnableIndexing() { return false; }
virtual bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) = 0;
virtual arangodb::Result Get(rocksdb::ColumnFamilyHandle*,
@ -151,10 +153,12 @@ class RocksDBTrxMethods : public RocksDBMethods {
public:
explicit RocksDBTrxMethods(RocksDBTransactionState* state);
virtual bool isIndexingDisabled() const override{ return _indexingDisabled; }
/// @brief returns true if indexing was disabled by this call
bool DisableIndexing() override;
void EnableIndexing() override;
bool EnableIndexing() override;
bool Exists(rocksdb::ColumnFamilyHandle*, RocksDBKey const&) override;
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, rocksdb::Slice const& key,
@ -278,6 +282,35 @@ struct IndexingDisabler {
RocksDBMethods* _meth;
};
// if only single indices should be enabled during operations
struct IndexingEnabler {
// will only be active if condition is true
IndexingEnabler() = delete;
IndexingEnabler(IndexingEnabler&&) = delete;
IndexingEnabler(IndexingEnabler const&) = delete;
IndexingEnabler& operator=(IndexingEnabler const&) = delete;
IndexingEnabler& operator=(IndexingEnabler&&) = delete;
IndexingEnabler(RocksDBMethods* meth, bool condition) : _meth(nullptr) {
if (condition) {
bool enableHere = meth->EnableIndexing();
if (enableHere) {
_meth = meth;
}
}
}
~IndexingEnabler() {
if (_meth) {
_meth->DisableIndexing();
}
}
private:
RocksDBMethods* _meth;
};
} // namespace arangodb
#endif

View File

@ -730,6 +730,61 @@ function ensureIndexSuite() {
}
});
assertTrue(found);
// this should work without problems:
collection.update("test1", {value: 'othervalue'});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test: ensure hash index on arrays
////////////////////////////////////////////////////////////////////////////////
testEnsureUniqueHashOnArray : function () {
var res = collection.getIndexes();
assertEqual(1, res.length);
var idx = collection.ensureIndex({ type: "hash", unique: true ,fields: [ "value[*]" ] });
assertEqual("hash", idx.type);
assertTrue(idx.unique);
assertFalse(idx.sparse);
assertEqual([ "value[*]" ], idx.fields);
res = collection.getIndexes()[collection.getIndexes().length - 1];
assertEqual("hash", res.type);
assertTrue(res.unique);
assertFalse(res.sparse);
assertEqual([ "value[*]" ], res.fields);
assertEqual(idx.id, res.id);
var i = 0;
for (i = 0; i < 100; ++i) {
collection.insert({ _key: "test" + i, value: [ i ] });
}
for (i = 0; i < 100; ++i) {
var doc = collection.document("test" + i);
assertEqual("test" + i, doc._key);
assertEqual(i, doc.value[0]);
}
var query = "FOR doc IN " + collection.name() + " FILTER 1 IN doc.value RETURN doc";
var st = db._createStatement({ query: query });
var found = false;
st.explain().plan.nodes.forEach(function(node) {
if (node.type === "IndexNode") {
assertTrue(node.indexes[0].type === "hash" && node.indexes[0].fields[0] === "value[*]");
found = true;
}
});
assertTrue(found);
// this should work without problems:
collection.update("test1", {value: ['1']});
collection.update("test1", {value: ['othervalue']});
collection.update("test1", {value: ['othervalue', 'morevalues']});
},
////////////////////////////////////////////////////////////////////////////////