1
0
Fork 0

potential fix for issue #3581: Unexpected "rocksdb unique constraint violated" with unique hash index (#3596)

This commit is contained in:
Jan 2017-11-07 10:42:41 +01:00 committed by Frank Celler
parent 151c717d22
commit 0195ede1d8
7 changed files with 114 additions and 71 deletions

View File

@ -343,68 +343,12 @@ Builder Collection::merge(Slice const& left, Slice const& right,
}
Builder b;
b.add(Value(ValueType::Object));
std::unordered_map<std::string, Slice> rightValues;
{
ObjectIterator it(right);
while (it.valid()) {
rightValues.emplace(it.key(true).copyString(), it.value());
it.next();
}
}
{
ObjectIterator it(left);
while (it.valid()) {
auto key = it.key(true).copyString();
auto found = rightValues.find(key);
if (found == rightValues.end()) {
// use left value
b.add(key, it.value());
} else if (mergeValues && it.value().isObject() &&
(*found).second.isObject()) {
// merge both values
auto& value = (*found).second;
if (!nullMeansRemove || (!value.isNone() && !value.isNull())) {
Builder sub = Collection::merge(it.value(), value, true, nullMeansRemove);
b.add(key, sub.slice());
}
// clear the value in the map so its not added again
(*found).second = Slice();
} else {
// use right value
auto& value = (*found).second;
if (!nullMeansRemove || (!value.isNone() && !value.isNull())) {
b.add(key, value);
}
// clear the value in the map so its not added again
(*found).second = Slice();
}
it.next();
}
}
// add remaining values that were only in right
for (auto& it : rightValues) {
auto& s = it.second;
if (s.isNone()) {
continue;
}
if (nullMeansRemove && s.isNull()) {
continue;
}
b.add(std::move(it.first), s);
}
b.close();
Collection::merge(b, left, right, mergeValues, nullMeansRemove);
return b;
}
Builder& Collection::merge(Builder& builder, Slice const& left, Slice const& right,
bool mergeValues, bool nullMeansRemove) {
bool mergeValues, bool nullMeansRemove) {
if (!left.isObject() || !right.isObject()) {
throw Exception(Exception::InvalidValueType, "Expecting type Object");
}
@ -435,6 +379,7 @@ Builder& Collection::merge(Builder& builder, Slice const& left, Slice const& rig
// merge both values
auto& value = (*found).second;
if (!nullMeansRemove || (!value.isNone() && !value.isNull())) {
builder.add(Value(key));
Collection::merge(builder, it.value(), value, true, nullMeansRemove);
}
// clear the value in the map so its not added again

View File

@ -1,6 +1,9 @@
devel
-----
* potential fix for issue #3581: Unexpected "rocksdb unique constraint
violated" with unique hash index
* fix agency precondition check for complex objects
* introduce `enforceReplicationFactor`: An optional parameter controlling

View File

@ -323,19 +323,19 @@ void RocksDBEngine::start() {
// _options.stats_dump_period_sec = 1;
}
rocksdb::BlockBasedTableOptions table_options;
rocksdb::BlockBasedTableOptions tableOptions;
if (opts->_blockCacheSize > 0) {
table_options.block_cache = rocksdb::NewLRUCache(opts->_blockCacheSize,
static_cast<int>(opts->_blockCacheShardBits));
//table_options.cache_index_and_filter_blocks = opts->_compactionReadaheadSize > 0;
tableOptions.block_cache = rocksdb::NewLRUCache(opts->_blockCacheSize,
static_cast<int>(opts->_blockCacheShardBits));
//tableOptions.cache_index_and_filter_blocks = opts->_compactionReadaheadSize > 0;
} else {
table_options.no_block_cache = true;
tableOptions.no_block_cache = true;
}
table_options.block_size = opts->_tableBlockSize;
table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
tableOptions.block_size = opts->_tableBlockSize;
tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
_options.table_factory.reset(
rocksdb::NewBlockBasedTableFactory(table_options));
rocksdb::NewBlockBasedTableFactory(tableOptions));
_options.create_if_missing = true;
_options.create_missing_column_families = true;
@ -362,13 +362,17 @@ void RocksDBEngine::start() {
rocksdb::ColumnFamilyOptions dynamicPrefCF(_options);
dynamicPrefCF.prefix_extractor = std::make_shared<RocksDBPrefixExtractor>();
// also use hash-search based SST file format
rocksdb::BlockBasedTableOptions tblo(table_options);
rocksdb::BlockBasedTableOptions tblo(tableOptions);
tblo.index_type = rocksdb::BlockBasedTableOptions::IndexType::kHashSearch;
dynamicPrefCF.table_factory = std::shared_ptr<rocksdb::TableFactory>(
rocksdb::NewBlockBasedTableFactory(tblo));
// velocypack based index variants with custom comparator
rocksdb::ColumnFamilyOptions vpackFixedPrefCF(fixedPrefCF);
rocksdb::BlockBasedTableOptions tblo2(tableOptions);
tblo2.filter_policy.reset(); // intentionally no bloom filter here
vpackFixedPrefCF.table_factory = std::shared_ptr<rocksdb::TableFactory>(
rocksdb::NewBlockBasedTableFactory(tblo2));
vpackFixedPrefCF.comparator = _vpackCmp.get();
// create column families

View File

@ -22,9 +22,10 @@
////////////////////////////////////////////////////////////////////////////////
#include "RocksDBHashIndex.h"
#include "Basics/VelocyPackHelper.h"
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
#include "Basics/VelocyPackHelper.h"
using namespace arangodb;

View File

@ -95,7 +95,7 @@ class RocksDBMethods {
// convenience and compatibility method
arangodb::Result Get(rocksdb::ColumnFamilyHandle*, RocksDBKey const&,
std::string*);
std::string*);
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE

View File

@ -39,7 +39,7 @@ namespace arangodb {
class RocksDBPrefixExtractor final : public rocksdb::SliceTransform {
public:
RocksDBPrefixExtractor() {}
~RocksDBPrefixExtractor(){};
~RocksDBPrefixExtractor() {}
const char* Name() const { return "RocksDBPrefixExtractor"; }

View File

@ -508,8 +508,98 @@ function HashIndexSuite() {
var doc2 = collection.save({ a : "test3", b : 1});
assertTrue(doc2._key !== "");
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test: documents
////////////////////////////////////////////////////////////////////////////////
testUniquenessAndLookup : function () {
var idx = collection.ensureIndex({ type: "hash", unique: true, fields: ["value"] });
assertEqual("hash", idx.type);
assertEqual(true, idx.unique);
assertEqual(["value"], idx.fields);
assertEqual(true, idx.isNewlyCreated);
var bound = 1000;
var i;
for (i = -bound; i < bound; ++i) {
collection.insert({ value: i });
}
if (internal.db._engine().name === "rocksdb") {
internal.db._executeTransaction({
collections: { write: cn },
action: function(params) {
// need to run compaction in the rocksdb case, as the lookups
// may use bloom filters afterwards but not for memtables
require("internal").db[params.cn].compact();
},
params: { cn }
});
}
assertEqual(2 * bound, collection.count());
for (i = -bound; i < bound; ++i) {
var docs = collection.byExample({ value: i }).toArray();
assertEqual(1, docs.length);
assertEqual(i, docs[0].value);
collection.update(docs[0]._key, docs[0]);
}
for (i = -bound; i < bound; ++i) {
try {
collection.insert({ value: i });
fail();
} catch (err) {
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, err.errorNum);
}
}
},
testUniquenessAndLookup2 : function () {
var idx = collection.ensureIndex({ type: "hash", unique: true, fields: ["value"] });
assertEqual("hash", idx.type);
assertEqual(true, idx.unique);
assertEqual(["value"], idx.fields);
assertEqual(true, idx.isNewlyCreated);
var i = 0;
while (i < 100000) {
for (var j = 0; j < 20; ++j) {
collection.insert({ value: i++ });
}
i *= 2;
}
if (internal.db._engine().name === "rocksdb") {
internal.db._executeTransaction({
collections: { write: cn },
action: function(params) {
// need to run compaction in the rocksdb case, as the lookups
// may use bloom filters afterwards but not for memtables
require("internal").db[params.cn].compact();
},
params: { cn }
});
}
i = 0;
while (i < 100000) {
for (j = 0; j < 20; ++j) {
var docs = collection.byExample({ value: i }).toArray();
assertEqual(1, docs.length);
assertEqual(i, docs[0].value);
collection.update(docs[0]._key, docs[0]);
++i;
}
i *= 2;
}
}
};
}