diff --git a/3rdParty/velocypack/src/Collection.cpp b/3rdParty/velocypack/src/Collection.cpp index e498a38454..b8b5dc4ded 100644 --- a/3rdParty/velocypack/src/Collection.cpp +++ b/3rdParty/velocypack/src/Collection.cpp @@ -343,68 +343,12 @@ Builder Collection::merge(Slice const& left, Slice const& right, } Builder b; - b.add(Value(ValueType::Object)); - - std::unordered_map rightValues; - { - ObjectIterator it(right); - while (it.valid()) { - rightValues.emplace(it.key(true).copyString(), it.value()); - it.next(); - } - } - - { - ObjectIterator it(left); - - while (it.valid()) { - auto key = it.key(true).copyString(); - auto found = rightValues.find(key); - - if (found == rightValues.end()) { - // use left value - b.add(key, it.value()); - } else if (mergeValues && it.value().isObject() && - (*found).second.isObject()) { - // merge both values - auto& value = (*found).second; - if (!nullMeansRemove || (!value.isNone() && !value.isNull())) { - Builder sub = Collection::merge(it.value(), value, true, nullMeansRemove); - b.add(key, sub.slice()); - } - // clear the value in the map so its not added again - (*found).second = Slice(); - } else { - // use right value - auto& value = (*found).second; - if (!nullMeansRemove || (!value.isNone() && !value.isNull())) { - b.add(key, value); - } - // clear the value in the map so its not added again - (*found).second = Slice(); - } - it.next(); - } - } - - // add remaining values that were only in right - for (auto& it : rightValues) { - auto& s = it.second; - if (s.isNone()) { - continue; - } - if (nullMeansRemove && s.isNull()) { - continue; - } - b.add(std::move(it.first), s); - } - - b.close(); + Collection::merge(b, left, right, mergeValues, nullMeansRemove); return b; } Builder& Collection::merge(Builder& builder, Slice const& left, Slice const& right, - bool mergeValues, bool nullMeansRemove) { + bool mergeValues, bool nullMeansRemove) { if (!left.isObject() || !right.isObject()) { throw Exception(Exception::InvalidValueType, "Expecting type Object"); } @@ -435,6 +379,7 @@ Builder& Collection::merge(Builder& builder, Slice const& left, Slice const& rig // merge both values auto& value = (*found).second; if (!nullMeansRemove || (!value.isNone() && !value.isNull())) { + builder.add(Value(key)); Collection::merge(builder, it.value(), value, true, nullMeansRemove); } // clear the value in the map so its not added again diff --git a/CHANGELOG b/CHANGELOG index 6dbbabf2b0..750b014bc8 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,9 @@ devel ----- +* potential fix for issue #3581: Unexpected "rocksdb unique constraint + violated" with unique hash index + * fix agency precondition check for complex objects * introduce `enforceReplicationFactor`: An optional parameter controlling diff --git a/arangod/RocksDBEngine/RocksDBEngine.cpp b/arangod/RocksDBEngine/RocksDBEngine.cpp index 535d889a19..3403c3bd4e 100644 --- a/arangod/RocksDBEngine/RocksDBEngine.cpp +++ b/arangod/RocksDBEngine/RocksDBEngine.cpp @@ -323,19 +323,19 @@ void RocksDBEngine::start() { // _options.stats_dump_period_sec = 1; } - rocksdb::BlockBasedTableOptions table_options; + rocksdb::BlockBasedTableOptions tableOptions; if (opts->_blockCacheSize > 0) { - table_options.block_cache = rocksdb::NewLRUCache(opts->_blockCacheSize, - static_cast(opts->_blockCacheShardBits)); - //table_options.cache_index_and_filter_blocks = opts->_compactionReadaheadSize > 0; + tableOptions.block_cache = rocksdb::NewLRUCache(opts->_blockCacheSize, + static_cast(opts->_blockCacheShardBits)); + //tableOptions.cache_index_and_filter_blocks = opts->_compactionReadaheadSize > 0; } else { - table_options.no_block_cache = true; + tableOptions.no_block_cache = true; } - table_options.block_size = opts->_tableBlockSize; - table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); + tableOptions.block_size = opts->_tableBlockSize; + tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); _options.table_factory.reset( - rocksdb::NewBlockBasedTableFactory(table_options)); + rocksdb::NewBlockBasedTableFactory(tableOptions)); _options.create_if_missing = true; _options.create_missing_column_families = true; @@ -362,13 +362,17 @@ void RocksDBEngine::start() { rocksdb::ColumnFamilyOptions dynamicPrefCF(_options); dynamicPrefCF.prefix_extractor = std::make_shared(); // also use hash-search based SST file format - rocksdb::BlockBasedTableOptions tblo(table_options); + rocksdb::BlockBasedTableOptions tblo(tableOptions); tblo.index_type = rocksdb::BlockBasedTableOptions::IndexType::kHashSearch; dynamicPrefCF.table_factory = std::shared_ptr( rocksdb::NewBlockBasedTableFactory(tblo)); // velocypack based index variants with custom comparator rocksdb::ColumnFamilyOptions vpackFixedPrefCF(fixedPrefCF); + rocksdb::BlockBasedTableOptions tblo2(tableOptions); + tblo2.filter_policy.reset(); // intentionally no bloom filter here + vpackFixedPrefCF.table_factory = std::shared_ptr( + rocksdb::NewBlockBasedTableFactory(tblo2)); vpackFixedPrefCF.comparator = _vpackCmp.get(); // create column families diff --git a/arangod/RocksDBEngine/RocksDBHashIndex.cpp b/arangod/RocksDBEngine/RocksDBHashIndex.cpp index f4a5551afc..6475df05e6 100644 --- a/arangod/RocksDBEngine/RocksDBHashIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBHashIndex.cpp @@ -22,9 +22,10 @@ //////////////////////////////////////////////////////////////////////////////// #include "RocksDBHashIndex.h" +#include "Basics/VelocyPackHelper.h" + #include #include -#include "Basics/VelocyPackHelper.h" using namespace arangodb; diff --git a/arangod/RocksDBEngine/RocksDBMethods.h b/arangod/RocksDBEngine/RocksDBMethods.h index 943269c53a..828ed208ca 100644 --- a/arangod/RocksDBEngine/RocksDBMethods.h +++ b/arangod/RocksDBEngine/RocksDBMethods.h @@ -95,7 +95,7 @@ class RocksDBMethods { // convenience and compatibility method arangodb::Result Get(rocksdb::ColumnFamilyHandle*, RocksDBKey const&, - std::string*); + std::string*); #ifdef ARANGODB_ENABLE_MAINTAINER_MODE diff --git a/arangod/RocksDBEngine/RocksDBPrefixExtractor.h b/arangod/RocksDBEngine/RocksDBPrefixExtractor.h index e754e4f484..dbac239a6b 100644 --- a/arangod/RocksDBEngine/RocksDBPrefixExtractor.h +++ b/arangod/RocksDBEngine/RocksDBPrefixExtractor.h @@ -39,7 +39,7 @@ namespace arangodb { class RocksDBPrefixExtractor final : public rocksdb::SliceTransform { public: RocksDBPrefixExtractor() {} - ~RocksDBPrefixExtractor(){}; + ~RocksDBPrefixExtractor() {} const char* Name() const { return "RocksDBPrefixExtractor"; } diff --git a/js/common/tests/shell/shell-hash-index.js b/js/common/tests/shell/shell-hash-index.js index 4fdcf0b64c..929abd2bb4 100644 --- a/js/common/tests/shell/shell-hash-index.js +++ b/js/common/tests/shell/shell-hash-index.js @@ -508,8 +508,98 @@ function HashIndexSuite() { var doc2 = collection.save({ a : "test3", b : 1}); assertTrue(doc2._key !== ""); - } + }, +//////////////////////////////////////////////////////////////////////////////// +/// @brief test: documents +//////////////////////////////////////////////////////////////////////////////// + + testUniquenessAndLookup : function () { + var idx = collection.ensureIndex({ type: "hash", unique: true, fields: ["value"] }); + + assertEqual("hash", idx.type); + assertEqual(true, idx.unique); + assertEqual(["value"], idx.fields); + assertEqual(true, idx.isNewlyCreated); + + var bound = 1000; + var i; + for (i = -bound; i < bound; ++i) { + collection.insert({ value: i }); + } + + if (internal.db._engine().name === "rocksdb") { + internal.db._executeTransaction({ + collections: { write: cn }, + action: function(params) { + // need to run compaction in the rocksdb case, as the lookups + // may use bloom filters afterwards but not for memtables + require("internal").db[params.cn].compact(); + }, + params: { cn } + }); + } + + assertEqual(2 * bound, collection.count()); + + for (i = -bound; i < bound; ++i) { + var docs = collection.byExample({ value: i }).toArray(); + assertEqual(1, docs.length); + assertEqual(i, docs[0].value); + + collection.update(docs[0]._key, docs[0]); + } + + for (i = -bound; i < bound; ++i) { + try { + collection.insert({ value: i }); + fail(); + } catch (err) { + assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum); + } + } + }, + + testUniquenessAndLookup2 : function () { + var idx = collection.ensureIndex({ type: "hash", unique: true, fields: ["value"] }); + + assertEqual("hash", idx.type); + assertEqual(true, idx.unique); + assertEqual(["value"], idx.fields); + assertEqual(true, idx.isNewlyCreated); + + var i = 0; + while (i < 100000) { + for (var j = 0; j < 20; ++j) { + collection.insert({ value: i++ }); + } + i *= 2; + } + + if (internal.db._engine().name === "rocksdb") { + internal.db._executeTransaction({ + collections: { write: cn }, + action: function(params) { + // need to run compaction in the rocksdb case, as the lookups + // may use bloom filters afterwards but not for memtables + require("internal").db[params.cn].compact(); + }, + params: { cn } + }); + } + + i = 0; + while (i < 100000) { + for (j = 0; j < 20; ++j) { + var docs = collection.byExample({ value: i }).toArray(); + assertEqual(1, docs.length); + assertEqual(i, docs[0].value); + collection.update(docs[0]._key, docs[0]); + ++i; + } + i *= 2; + } + } }; }