1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
jsteemann 2017-05-30 16:39:48 +02:00
commit 2961f33a4b
22 changed files with 279 additions and 303 deletions

View File

@ -634,7 +634,7 @@ struct DMIDGraphFormat : public GraphFormat<DMIDValue, float> {
b.add(_resultField, VPackValue(VPackValueType::Array)); b.add(_resultField, VPackValue(VPackValueType::Array));
for (std::pair<PregelID, float> const& pair : ptr->membershipDegree) { for (std::pair<PregelID, float> const& pair : ptr->membershipDegree) {
b.openArray(); b.openArray();
b.add(VPackValue(pair.first.key)); b.add(VPackValue(arangodb::basics::StringUtils::int64(pair.first.key)));
b.add(VPackValue(pair.second)); b.add(VPackValue(pair.second));
b.close(); b.close();
} }

View File

@ -192,12 +192,22 @@ struct SLPAGraphFormat : public GraphFormat<SLPAValue, int8_t> {
} else if (vec.size() == 1 || maxCommunities == 1) { } else if (vec.size() == 1 || maxCommunities == 1) {
b.add(resField, VPackValue(vec[0].first)); b.add(resField, VPackValue(vec[0].first));
} else { } else {
b.add(resField, VPackValue(VPackValueType::Object)); // output for use with the DMID/Metrics code
for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) { b.add(resField, VPackValue(VPackValueType::Array));
b.add(arangodb::basics::StringUtils::itoa(vec[c].first), for (unsigned c = 0; c < vec.size() && c < maxCommunities;
VPackValue(vec[c].second)); c++) {
b.openArray();
b.add(VPackValue(vec[c].first));
b.add(VPackValue(vec[c].second));
b.close();
} }
b.close(); b.close();
/*b.add(resField, VPackValue(VPackValueType::Object));
for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) {
b.add(arangodb::basics::StringUtils::itoa(vec[c].first),
VPackValue(vec[c].second));
}
b.close();*/
} }
} }
return true; return true;

View File

@ -631,9 +631,10 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
TRI_ASSERT(_objectId != 0); TRI_ASSERT(_objectId != 0);
TRI_voc_cid_t cid = _logicalCollection->cid(); TRI_voc_cid_t cid = _logicalCollection->cid();
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx); RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
// delete documents // delete documents
RocksDBMethods* mthd = state->rocksdbMethods(); RocksDBMethods* mthd;
mthd = state->rocksdbMethods();
RocksDBKeyBounds documentBounds = RocksDBKeyBounds documentBounds =
RocksDBKeyBounds::CollectionDocuments(this->objectId()); RocksDBKeyBounds::CollectionDocuments(this->objectId());

View File

@ -187,6 +187,7 @@ void iterateBounds(
rocksdb::ReadOptions options = rocksdb::ReadOptions()) { rocksdb::ReadOptions options = rocksdb::ReadOptions()) {
rocksdb::Slice const end = bounds.end(); rocksdb::Slice const end = bounds.end();
options.iterate_upper_bound = &end;// save to use on rocksb::DB directly options.iterate_upper_bound = &end;// save to use on rocksb::DB directly
options.prefix_same_as_start = true;
std::unique_ptr<rocksdb::Iterator> it(globalRocksDB()->NewIterator(options, handle)); std::unique_ptr<rocksdb::Iterator> it(globalRocksDB()->NewIterator(options, handle));
for (it->Seek(bounds.start()); it->Valid(); it->Next()) { for (it->Seek(bounds.start()); it->Valid(); it->Next()) {
callback(it.get()); callback(it.get());

View File

@ -328,30 +328,21 @@ void RocksDBEdgeIndexIterator::lookupInRocksDB(StringRef fromTo) {
rocksdb::Comparator const* cmp = _index->comparator(); rocksdb::Comparator const* cmp = _index->comparator();
_builder.openArray(); _builder.openArray();
RocksDBToken token;
auto end = _bounds.end(); auto end = _bounds.end();
while (_iterator->Valid() && while (_iterator->Valid() && (cmp->Compare(_iterator->key(), end) < 0)) {
(cmp->Compare(_iterator->key(), end) < 0)) { TRI_voc_rid_t revisionId = RocksDBKey::revisionId(_iterator->key());
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key()); RocksDBToken token(revisionId);
Result res = rocksColl->lookupDocumentToken(_trx, edgeKey, token);
if (res.ok()) { ManagedDocumentResult mmdr;
ManagedDocumentResult mmdr; if (rocksColl->readDocument(_trx, token, mmdr)) {
if (rocksColl->readDocument(_trx, token, mmdr)) { _builder.add(VPackValue(token.revisionId()));
_builder.add(VPackValue(token.revisionId())); VPackSlice doc(mmdr.vpack());
VPackSlice doc(mmdr.vpack()); TRI_ASSERT(doc.isObject());
TRI_ASSERT(doc.isObject()); _builder.add(doc);
_builder.add(doc);
} else {
// Data Inconsistency.
// We have a revision id without a document...
TRI_ASSERT(false);
}
#ifdef USE_MAINTAINER_MODE
} else { } else {
// Index inconsistency, we indexed a primaryKey => revision that is // Data Inconsistency.
// not known any more // We have a revision id without a document...
TRI_ASSERT(res.ok()); TRI_ASSERT(false);
#endif
} }
_iterator->Next(); _iterator->Next();
} }
@ -455,12 +446,10 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
int RocksDBEdgeIndex::insert(transaction::Methods* trx, int RocksDBEdgeIndex::insert(transaction::Methods* trx,
TRI_voc_rid_t revisionId, VPackSlice const& doc, TRI_voc_rid_t revisionId, VPackSlice const& doc,
bool isRollback) { bool isRollback) {
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.get(_directionAttr); VPackSlice fromTo = doc.get(_directionAttr);
TRI_ASSERT(primaryKey.isString() && fromTo.isString()); TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
RocksDBKey key = RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId);
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey));
// blacklist key in cache // blacklist key in cache
blackListKey(fromToRef); blackListKey(fromToRef);
@ -486,12 +475,11 @@ int RocksDBEdgeIndex::insertRaw(RocksDBMethods*, TRI_voc_rid_t,
int RocksDBEdgeIndex::remove(transaction::Methods* trx, int RocksDBEdgeIndex::remove(transaction::Methods* trx,
TRI_voc_rid_t revisionId, VPackSlice const& doc, TRI_voc_rid_t revisionId, VPackSlice const& doc,
bool isRollback) { bool isRollback) {
VPackSlice primaryKey = doc.get(StaticStrings::KeyString); // VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.get(_directionAttr); VPackSlice fromTo = doc.get(_directionAttr);
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
TRI_ASSERT(primaryKey.isString() && fromTo.isString()); TRI_ASSERT(fromTo.isString());
RocksDBKey key = RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId);
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey));
// blacklist key in cache // blacklist key in cache
blackListKey(fromToRef); blackListKey(fromToRef);
@ -521,12 +509,12 @@ void RocksDBEdgeIndex::batchInsert(
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) { std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
for (std::pair<TRI_voc_rid_t, VPackSlice> const& doc : documents) { for (std::pair<TRI_voc_rid_t, VPackSlice> const& doc : documents) {
VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString); // VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.second.get(_directionAttr); VPackSlice fromTo = doc.second.get(_directionAttr);
TRI_ASSERT(primaryKey.isString() && fromTo.isString()); TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo); auto fromToRef = StringRef(fromTo);
RocksDBKey key = RocksDBKey key =
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey)); RocksDBKey::EdgeIndexValue(_objectId, fromToRef, doc.first);
blackListKey(fromToRef); blackListKey(fromToRef);
Result r = mthd->Put(_cf, rocksdb::Slice(key.string()), rocksdb::Slice(), Result r = mthd->Put(_cf, rocksdb::Slice(key.string()), rocksdb::Slice(),
@ -648,7 +636,8 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) {
return; return;
} }
auto rocksColl = toRocksDBCollection(_collection); auto rocksColl = toRocksDBCollection(_collection);
uint64_t expectedCount = static_cast<uint64_t>(selectivityEstimate() * rocksColl->numberDocuments()); uint64_t expectedCount = static_cast<uint64_t>(selectivityEstimate() *
rocksColl->numberDocuments());
// Prepare the cache to be resized for this amount of objects to be inserted. // Prepare the cache to be resized for this amount of objects to be inserted.
_cache->sizeHint(expectedCount); _cache->sizeHint(expectedCount);
@ -657,77 +646,77 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) {
std::string previous = ""; std::string previous = "";
VPackBuilder builder; VPackBuilder builder;
ManagedDocumentResult mmdr; ManagedDocumentResult mmdr;
RocksDBToken token;
bool needsInsert = false; bool needsInsert = false;
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { rocksutils::iterateBounds(
auto key = it->key(); bounds,
StringRef v = RocksDBKey::vertexId(key); [&](rocksdb::Iterator* it) {
if (previous.empty()) { rocksdb::Slice key = it->key();
// First call. StringRef v = RocksDBKey::vertexId(key);
builder.clear(); if (previous.empty()) {
previous = v.toString(); // First call.
auto finding = _cache->find(previous.data(), (uint32_t)previous.size()); builder.clear();
if (finding.found()) { previous = v.toString();
needsInsert = false; auto finding =
} else { _cache->find(previous.data(), (uint32_t)previous.size());
needsInsert = true; if (finding.found()) {
builder.openArray(); needsInsert = false;
} } else {
needsInsert = true;
} builder.openArray();
}
if (v != previous) {
if (needsInsert) {
// Switch to next vertex id.
// Store what we have.
builder.close();
while(_cache->isResizing() || _cache->isMigrating()) {
// We should wait here, the cache will reject
// any inserts anyways.
usleep(10000);
} }
auto entry = cache::CachedValue::construct( if (v != previous) {
previous.data(), static_cast<uint32_t>(previous.size()), if (needsInsert) {
builder.slice().start(), // Switch to next vertex id.
static_cast<uint64_t>(builder.slice().byteSize())); // Store what we have.
if (!_cache->insert(entry)) { builder.close();
delete entry;
while (_cache->isResizing() || _cache->isMigrating()) {
// We should wait here, the cache will reject
// any inserts anyways.
usleep(10000);
}
auto entry = cache::CachedValue::construct(
previous.data(), static_cast<uint32_t>(previous.size()),
builder.slice().start(),
static_cast<uint64_t>(builder.slice().byteSize()));
if (!_cache->insert(entry)) {
delete entry;
}
builder.clear();
}
// Need to store
previous = v.toString();
auto finding =
_cache->find(previous.data(), (uint32_t)previous.size());
if (finding.found()) {
needsInsert = false;
} else {
needsInsert = true;
builder.openArray();
}
} }
builder.clear(); if (needsInsert) {
} TRI_voc_rid_t revisionId = RocksDBKey::revisionId(key);
// Need to store RocksDBToken token(revisionId);
previous = v.toString(); if (rocksColl->readDocument(trx, token, mmdr)) {
auto finding = _cache->find(previous.data(), (uint32_t)previous.size()); builder.add(VPackValue(token.revisionId()));
if (finding.found()) { VPackSlice doc(mmdr.vpack());
needsInsert = false; TRI_ASSERT(doc.isObject());
} else { builder.add(doc);
needsInsert = true;
builder.openArray();
}
}
if (needsInsert) {
StringRef edgeKey = RocksDBKey::primaryKey(key);
Result res = rocksColl->lookupDocumentToken(trx, edgeKey, token);
if (res.ok() && rocksColl->readDocument(trx, token, mmdr)) {
builder.add(VPackValue(token.revisionId()));
VPackSlice doc(mmdr.vpack());
TRI_ASSERT(doc.isObject());
builder.add(doc);
#ifdef USE_MAINTAINER_MODE #ifdef USE_MAINTAINER_MODE
} else { } else {
// Data Inconsistency. // Data Inconsistency.
// We have a revision id without a document... // We have a revision id without a document...
TRI_ASSERT(false); TRI_ASSERT(false);
#endif #endif
} }
} }
}, RocksDBColumnFamily::edge()); },
RocksDBColumnFamily::edge());
if (!previous.empty() && needsInsert) { if (!previous.empty() && needsInsert) {
// We still have something to store // We still have something to store
@ -847,10 +836,13 @@ void RocksDBEdgeIndex::recalculateEstimates() {
_estimator->clear(); _estimator->clear();
auto bounds = RocksDBKeyBounds::EdgeIndex(_objectId); auto bounds = RocksDBKeyBounds::EdgeIndex(_objectId);
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { rocksutils::iterateBounds(bounds,
uint64_t hash = RocksDBEdgeIndex::HashForKey(it->key()); [&](rocksdb::Iterator* it) {
_estimator->insert(hash); uint64_t hash =
}, arangodb::RocksDBColumnFamily::edge()); RocksDBEdgeIndex::HashForKey(it->key());
_estimator->insert(hash);
},
arangodb::RocksDBColumnFamily::edge());
} }
Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx, Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx,

View File

@ -201,14 +201,13 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = RocksDBValue::IndexValue(); RocksDBValue value = RocksDBValue::IndexValue();
int res = TRI_ERROR_NO_ERROR; int res = TRI_ERROR_NO_ERROR;
// size_t const count = words.size(); // size_t const count = words.size();
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKey key = RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
Result r = mthd->Put(_cf, key, value.string(), rocksutils::index); Result r = mthd->Put(_cf, key, value.string(), rocksutils::index);
if (!r.ok()) { if (!r.ok()) {
@ -220,14 +219,16 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
for (size_t j = 0; j < i; ++j) { for (size_t j = 0; j < i; ++j) {
std::string const& word = words[j]; std::string const& word = words[j];
RocksDBKey key = RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); RocksDBKey::FulltextIndexValue(_objectId, StringRef(word),
revisionId);
rtrx->Delete(key.string()); rtrx->Delete(key.string());
} }
}*/ }*/
return res; return res;
} }
int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t, int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc) { arangodb::velocypack::Slice const& doc) {
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);
if (words.empty()) { if (words.empty()) {
@ -236,12 +237,12 @@ int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t,
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString)); // StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = RocksDBValue::IndexValue(); RocksDBValue value = RocksDBValue::IndexValue();
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKey key = RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
batch->Put(_cf, key, value.string()); batch->Put(_cf, key, value.string());
} }
@ -261,11 +262,10 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
int res = TRI_ERROR_NO_ERROR; int res = TRI_ERROR_NO_ERROR;
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKey key = RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
Result r = mthd->Delete(_cf, key); Result r = mthd->Delete(_cf, key);
if (!r.ok()) { if (!r.ok()) {
@ -276,15 +276,15 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
return res; return res;
} }
int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch, TRI_voc_rid_t, int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc) { arangodb::velocypack::Slice const& doc) {
std::set<std::string> words = wordlist(doc); std::set<std::string> words = wordlist(doc);
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
for (std::string const& word : words) { for (std::string const& word : words) {
RocksDBKey key = RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
batch->Delete(_cf, key); batch->Delete(_cf, key);
} }
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
@ -462,29 +462,25 @@ Result RocksDBFulltextIndex::executeQuery(transaction::Methods* trx,
FulltextQuery const& query, FulltextQuery const& query,
size_t maxResults, size_t maxResults,
VPackBuilder& builder) { VPackBuilder& builder) {
std::set<std::string> resultSet; std::set<TRI_voc_rid_t> resultSet;
for (FulltextQueryToken const& token : query) { for (FulltextQueryToken const& token : query) {
applyQueryToken(trx, token, resultSet); applyQueryToken(trx, token, resultSet);
} }
auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical()); auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical());
auto idx = physical->primaryIndex();
ManagedDocumentResult mmdr; ManagedDocumentResult mmdr;
if (maxResults == 0) { // 0 appearantly means "all results" if (maxResults == 0) { // 0 appearantly means "all results"
maxResults = SIZE_MAX; maxResults = SIZE_MAX;
} }
builder.openArray(); builder.openArray();
// get the first N results // get the first N results
std::set<std::string>::iterator it = resultSet.cbegin(); std::set<TRI_voc_rid_t>::iterator it = resultSet.cbegin();
while (maxResults > 0 && it != resultSet.cend()) { while (maxResults > 0 && it != resultSet.cend()) {
RocksDBToken token = idx->lookupKey(trx, StringRef(*it)); RocksDBToken token(*it);
if (token.revisionId()) { if (token.revisionId() && physical->readDocument(trx, token, mmdr)) {
if (physical->readDocument(trx, token, mmdr)) { mmdr.addToBuilder(builder, true);
mmdr.addToBuilder(builder, true); maxResults--;
maxResults--;
}
} }
++it; ++it;
} }
@ -503,9 +499,9 @@ static RocksDBKeyBounds MakeBounds(uint64_t oid,
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
} }
Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx, Result RocksDBFulltextIndex::applyQueryToken(
FulltextQueryToken const& token, transaction::Methods* trx, FulltextQueryToken const& token,
std::set<std::string>& resultSet) { std::set<TRI_voc_rid_t>& resultSet) {
RocksDBMethods* mthds = rocksutils::toRocksMethods(trx); RocksDBMethods* mthds = rocksutils::toRocksMethods(trx);
// why can't I have an assignment operator when I want one // why can't I have an assignment operator when I want one
RocksDBKeyBounds bounds = MakeBounds(_objectId, token); RocksDBKeyBounds bounds = MakeBounds(_objectId, token);
@ -518,7 +514,7 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
iter->Seek(bounds.start()); iter->Seek(bounds.start());
// set is used to perform an intersection with the result set // set is used to perform an intersection with the result set
std::set<std::string> intersect; std::set<TRI_voc_rid_t> intersect;
// apply left to right logic, merging all current results with ALL previous // apply left to right logic, merging all current results with ALL previous
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) { while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key())); TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
@ -528,13 +524,13 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
return rocksutils::convertStatus(s); return rocksutils::convertStatus(s);
} }
StringRef key = RocksDBKey::primaryKey(iter->key()); TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key());
if (token.operation == FulltextQueryToken::AND) { if (token.operation == FulltextQueryToken::AND) {
intersect.insert(key.toString()); intersect.insert(revisionId);
} else if (token.operation == FulltextQueryToken::OR) { } else if (token.operation == FulltextQueryToken::OR) {
resultSet.insert(key.toString()); resultSet.insert(revisionId);
} else if (token.operation == FulltextQueryToken::EXCLUDE) { } else if (token.operation == FulltextQueryToken::EXCLUDE) {
resultSet.erase(key.toString()); resultSet.erase(revisionId);
} }
iter->Next(); iter->Next();
} }
@ -542,7 +538,7 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
if (resultSet.empty() || intersect.empty()) { if (resultSet.empty() || intersect.empty()) {
resultSet.clear(); resultSet.clear();
} else { } else {
std::set<std::string> output; std::set<TRI_voc_rid_t> output;
std::set_intersection(resultSet.begin(), resultSet.end(), std::set_intersection(resultSet.begin(), resultSet.end(),
intersect.begin(), intersect.end(), intersect.begin(), intersect.end(),
std::inserter(output, output.begin())); std::inserter(output, output.begin()));

View File

@ -141,7 +141,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
arangodb::Result applyQueryToken(transaction::Methods* trx, arangodb::Result applyQueryToken(transaction::Methods* trx,
FulltextQueryToken const&, FulltextQueryToken const&,
std::set<std::string>& resultSet); std::set<TRI_voc_rid_t>& resultSet);
}; };
} // namespace arangodb } // namespace arangodb

View File

@ -60,16 +60,16 @@ RocksDBKey RocksDBKey::PrimaryIndexValue(uint64_t indexId,
RocksDBKey RocksDBKey::EdgeIndexValue(uint64_t indexId, RocksDBKey RocksDBKey::EdgeIndexValue(uint64_t indexId,
arangodb::StringRef const& vertexId, arangodb::StringRef const& vertexId,
arangodb::StringRef const& primaryKey) { TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::EdgeIndexValue, indexId, vertexId, return RocksDBKey(RocksDBEntryType::EdgeIndexValue, indexId, vertexId,
primaryKey); revisionId);
} }
RocksDBKey RocksDBKey::IndexValue(uint64_t indexId, RocksDBKey RocksDBKey::IndexValue(uint64_t indexId,
arangodb::StringRef const& primaryKey, VPackSlice const& indexValues,
VPackSlice const& indexValues) { TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::IndexValue, indexId, primaryKey, return RocksDBKey(RocksDBEntryType::IndexValue, indexId, indexValues,
indexValues); revisionId);
} }
RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId, RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId,
@ -79,18 +79,20 @@ RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId,
RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId, RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId,
arangodb::StringRef const& word, arangodb::StringRef const& word,
arangodb::StringRef const& primaryKey) { TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey); return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word,
revisionId);
} }
RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot) { RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset,
bool isSlot) {
RocksDBKey key(RocksDBEntryType::GeoIndexValue); RocksDBKey key(RocksDBEntryType::GeoIndexValue);
size_t length = sizeof(char) + sizeof(indexId) + sizeof(offset); size_t length = sizeof(char) + sizeof(indexId) + sizeof(offset);
key._buffer.reserve(length); key._buffer.reserve(length);
uint64ToPersistent(key._buffer, indexId); uint64ToPersistent(key._buffer, indexId);
uint64_t norm = uint64_t(offset) << 32; uint64_t norm = uint64_t(offset) << 32;
norm |= isSlot ? 0xFFU : 0; //encode slot|pot in lowest bit norm |= isSlot ? 0xFFU : 0; // encode slot|pot in lowest bit
uint64ToPersistent(key._buffer, norm); uint64ToPersistent(key._buffer, norm);
return key; return key;
} }
@ -171,6 +173,7 @@ arangodb::StringRef RocksDBKey::primaryKey(RocksDBKey const& key) {
arangodb::StringRef RocksDBKey::primaryKey(rocksdb::Slice const& slice) { arangodb::StringRef RocksDBKey::primaryKey(rocksdb::Slice const& slice) {
return primaryKey(slice.data(), slice.size()); return primaryKey(slice.data(), slice.size());
} }
StringRef RocksDBKey::vertexId(RocksDBKey const& key) { StringRef RocksDBKey::vertexId(RocksDBKey const& key) {
return vertexId(key._buffer.data(), key._buffer.size()); return vertexId(key._buffer.data(), key._buffer.size());
} }
@ -191,8 +194,9 @@ std::pair<bool, int32_t> RocksDBKey::geoValues(rocksdb::Slice const& slice) {
TRI_ASSERT(slice.size() >= sizeof(char) + sizeof(uint64_t) * 2); TRI_ASSERT(slice.size() >= sizeof(char) + sizeof(uint64_t) * 2);
RocksDBEntryType type = static_cast<RocksDBEntryType>(*slice.data()); RocksDBEntryType type = static_cast<RocksDBEntryType>(*slice.data());
TRI_ASSERT(type == RocksDBEntryType::GeoIndexValue); TRI_ASSERT(type == RocksDBEntryType::GeoIndexValue);
uint64_t val = uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t)); uint64_t val =
bool isSlot = ((val & 0xFFULL) > 0);// lowest byte is 0xFF if true uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t));
bool isSlot = ((val & 0xFFULL) > 0); // lowest byte is 0xFF if true
return std::pair<bool, int32_t>(isSlot, (val >> 32)); return std::pair<bool, int32_t>(isSlot, (val >> 32));
} }
@ -275,27 +279,22 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second)
} }
RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& docKey, VPackSlice const& second, uint64_t third)
VPackSlice const& indexData)
: _type(type), _buffer() { : _type(type), _buffer() {
switch (_type) { switch (_type) {
case RocksDBEntryType::IndexValue: { case RocksDBEntryType::IndexValue: {
// Non-unique VPack index values are stored as follows: // Non-unique VPack index values are stored as follows:
// - Key: 6 + 8-byte object ID of index + VPack array with index value(s) // - Key: 6 + 8-byte object ID of index + VPack array with index value(s)
// + separator byte + primary key + primary key length // + revisionID
// - Value: empty // - Value: empty
size_t length = sizeof(char) + sizeof(uint64_t) + size_t length = sizeof(char) + sizeof(uint64_t) +
static_cast<size_t>(indexData.byteSize()) + sizeof(char) + static_cast<size_t>(second.byteSize()) + sizeof(uint64_t);
docKey.length() + sizeof(char);
_buffer.reserve(length); _buffer.reserve(length);
_buffer.push_back(static_cast<char>(_type)); _buffer.push_back(static_cast<char>(_type));
uint64ToPersistent(_buffer, first); uint64ToPersistent(_buffer, first);
_buffer.append(reinterpret_cast<char const*>(indexData.begin()), _buffer.append(reinterpret_cast<char const*>(second.begin()),
static_cast<size_t>(indexData.byteSize())); static_cast<size_t>(second.byteSize()));
_buffer.push_back(_stringSeparator); uint64ToPersistent(_buffer, third);
_buffer.append(docKey.data(), docKey.length());
_buffer.push_back(static_cast<char>(docKey.length() & 0xff));
TRI_ASSERT(_buffer.size() == length); TRI_ASSERT(_buffer.size() == length);
break; break;
} }
@ -324,22 +323,19 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
} }
RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second, arangodb::StringRef const& second, uint64_t third)
arangodb::StringRef const& third)
: _type(type), _buffer() { : _type(type), _buffer() {
switch (_type) { switch (_type) {
case RocksDBEntryType::FulltextIndexValue: case RocksDBEntryType::FulltextIndexValue:
case RocksDBEntryType::EdgeIndexValue: { case RocksDBEntryType::EdgeIndexValue: {
size_t length = sizeof(char) + sizeof(uint64_t) + second.size() + size_t length =
sizeof(char) + third.size() + sizeof(uint8_t); sizeof(char) + sizeof(uint64_t) + second.size() + sizeof(third);
_buffer.reserve(length); _buffer.reserve(length);
_buffer.push_back(static_cast<char>(_type)); _buffer.push_back(static_cast<char>(_type));
uint64ToPersistent(_buffer, first); uint64ToPersistent(_buffer, first);
_buffer.append(second.data(), second.length()); _buffer.append(second.data(), second.length());
_buffer.push_back(_stringSeparator); _buffer.push_back(_stringSeparator);
_buffer.append(third.data(), third.length()); uint64ToPersistent(_buffer, third);
TRI_ASSERT(third.size() <= 254);
_buffer.push_back(static_cast<char>(third.size() & 0xff));
break; break;
} }
@ -434,6 +430,13 @@ TRI_voc_rid_t RocksDBKey::revisionId(char const* data, size_t size) {
TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t)))); TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t))));
return uint64FromPersistent(data + sizeof(char) + sizeof(uint64_t)); return uint64FromPersistent(data + sizeof(char) + sizeof(uint64_t));
} }
case RocksDBEntryType::EdgeIndexValue:
case RocksDBEntryType::IndexValue:
case RocksDBEntryType::FulltextIndexValue: {
TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t))));
// last 8 bytes should by revision
return uint64FromPersistent(data + size - sizeof(uint64_t));
}
default: default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR); THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR);
@ -451,14 +454,6 @@ arangodb::StringRef RocksDBKey::primaryKey(char const* data, size_t size) {
return arangodb::StringRef(data + sizeof(char) + sizeof(uint64_t), return arangodb::StringRef(data + sizeof(char) + sizeof(uint64_t),
keySize); keySize);
} }
case RocksDBEntryType::EdgeIndexValue:
case RocksDBEntryType::IndexValue:
case RocksDBEntryType::FulltextIndexValue: {
TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t)));
size_t keySize = static_cast<size_t>(data[size - 1]);
return arangodb::StringRef(data + (size - (keySize + sizeof(uint8_t))),
keySize);
}
default: default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR); THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR);
@ -471,11 +466,10 @@ StringRef RocksDBKey::vertexId(char const* data, size_t size) {
RocksDBEntryType type = static_cast<RocksDBEntryType>(data[0]); RocksDBEntryType type = static_cast<RocksDBEntryType>(data[0]);
switch (type) { switch (type) {
case RocksDBEntryType::EdgeIndexValue: { case RocksDBEntryType::EdgeIndexValue: {
TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t))); // 1 byte prefix + 8 byte objectID + _from/_to + 1 byte \0 + 8 byte rev
size_t keySize = static_cast<size_t>(data[size - 1]); TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t)) * 2);
size_t idSize = size - (sizeof(char) + sizeof(uint64_t) + sizeof(char) + size_t keySize = size - (sizeof(char) + sizeof(uint64_t)) * 2;
keySize + sizeof(uint8_t)); return StringRef(data + sizeof(char) + sizeof(uint64_t), keySize);
return StringRef(data + sizeof(char) + sizeof(uint64_t), idSize);
} }
default: default:

View File

@ -89,7 +89,7 @@ class RocksDBKey {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static RocksDBKey EdgeIndexValue(uint64_t indexId, static RocksDBKey EdgeIndexValue(uint64_t indexId,
arangodb::StringRef const& vertexId, arangodb::StringRef const& vertexId,
arangodb::StringRef const& primaryKey); TRI_voc_rid_t revisionId);
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for an entry in a user-defined, /// @brief Create a fully-specified key for an entry in a user-defined,
@ -98,9 +98,8 @@ class RocksDBKey {
/// The indexId is an object ID generated by the engine, rather than the /// The indexId is an object ID generated by the engine, rather than the
/// actual index ID. /// actual index ID.
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static RocksDBKey IndexValue(uint64_t indexId, static RocksDBKey IndexValue(uint64_t indexId, VPackSlice const& indexValues,
arangodb::StringRef const& primaryKey, TRI_voc_rid_t revisionId);
VPackSlice const& indexValues);
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for an entry in a unique user-defined /// @brief Create a fully-specified key for an entry in a unique user-defined
@ -117,12 +116,13 @@ class RocksDBKey {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static RocksDBKey FulltextIndexValue(uint64_t indexId, static RocksDBKey FulltextIndexValue(uint64_t indexId,
arangodb::StringRef const& word, arangodb::StringRef const& word,
arangodb::StringRef const& primaryKey); TRI_voc_rid_t revisionId);
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for a geoIndexValue /// @brief Create a fully-specified key for a geoIndexValue
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot); static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset,
bool isSlot);
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for a view /// @brief Create a fully-specified key for a view
@ -161,7 +161,6 @@ class RocksDBKey {
return type(slice.data(), slice.size()); return type(slice.data(), slice.size());
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/// @brief Extracts the object id /// @brief Extracts the object id
/// ///
@ -257,14 +256,12 @@ class RocksDBKey {
RocksDBKey(RocksDBEntryType type, uint64_t first); RocksDBKey(RocksDBEntryType type, uint64_t first);
RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second); RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second);
RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& slice); RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& slice);
RocksDBKey(RocksDBEntryType type, uint64_t first, RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& second,
arangodb::StringRef const& docKey, VPackSlice const& indexData); TRI_voc_rid_t third);
RocksDBKey(RocksDBEntryType type, uint64_t first, RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second); arangodb::StringRef const& second);
RocksDBKey(RocksDBEntryType type, uint64_t first, std::string const& second, RocksDBKey(RocksDBEntryType type, uint64_t first,
std::string const& third); arangodb::StringRef const& second, uint64_t third);
RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second,
arangodb::StringRef const& third);
private: private:
static inline RocksDBEntryType type(char const* data, size_t size) { static inline RocksDBEntryType type(char const* data, size_t size) {

View File

@ -231,6 +231,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type)
} }
} }
/// bounds to iterate over entire index
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first) RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
: _type(type) { : _type(type) {
switch (_type) { switch (_type) {
@ -270,7 +271,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
_internals.reserve(length); _internals.reserve(length);
_internals.push_back(static_cast<char>(_type)); _internals.push_back(static_cast<char>(_type));
uint64ToPersistent(_internals.buffer(), first); uint64ToPersistent(_internals.buffer(), first);
uint64ToPersistent(_internals.buffer(), 0);
_internals.separate(); _internals.separate();
@ -301,6 +301,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
} }
} }
/// bounds to iterate over specified word or edge
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second) arangodb::StringRef const& second)
: _type(type) { : _type(type) {
@ -320,7 +321,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first); uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(second.data(), second.length()); _internals.buffer().append(second.data(), second.length());
_internals.push_back(_stringSeparator); _internals.push_back(_stringSeparator);
_internals.push_back(0xFFU); uint64ToPersistent(_internals.buffer(), UINT64_MAX);
break; break;
} }
@ -329,6 +331,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
} }
} }
/// iterate over the specified bounds of the velocypack index
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
VPackSlice const& second, VPackSlice const& second,
VPackSlice const& third) VPackSlice const& third)
@ -347,7 +350,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first); uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(reinterpret_cast<char const*>(second.begin()), _internals.buffer().append(reinterpret_cast<char const*>(second.begin()),
static_cast<size_t>(second.byteSize())); static_cast<size_t>(second.byteSize()));
_internals.push_back(_stringSeparator);
_internals.separate(); _internals.separate();
@ -355,8 +357,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first); uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(reinterpret_cast<char const*>(third.begin()), _internals.buffer().append(reinterpret_cast<char const*>(third.begin()),
static_cast<size_t>(third.byteSize())); static_cast<size_t>(third.byteSize()));
_internals.push_back(_stringSeparator + 1); // compare greater than uint64ToPersistent(_internals.buffer(), UINT64_MAX);
// actual key
break; break;
} }

View File

@ -80,11 +80,9 @@ static std::vector<arangodb::basics::AttributeName> const KeyAttribute{
RocksDBVPackIndexIterator::RocksDBVPackIndexIterator( RocksDBVPackIndexIterator::RocksDBVPackIndexIterator(
LogicalCollection* collection, transaction::Methods* trx, LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index, ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index,
arangodb::RocksDBPrimaryIndex* primaryIndex, bool reverse, bool reverse, VPackSlice const& left, VPackSlice const& right)
VPackSlice const& left, VPackSlice const& right)
: IndexIterator(collection, trx, mmdr, index), : IndexIterator(collection, trx, mmdr, index),
_index(index), _index(index),
_primaryIndex(primaryIndex),
_cmp(index->comparator()), _cmp(index->comparator()),
_reverse(reverse), _reverse(reverse),
_bounds(index->_unique ? RocksDBKeyBounds::UniqueIndexRange( _bounds(index->_unique ? RocksDBKeyBounds::UniqueIndexRange(
@ -141,14 +139,12 @@ bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) {
while (limit > 0) { while (limit > 0) {
TRI_ASSERT(_index->objectId() == RocksDBKey::objectId(_iterator->key())); TRI_ASSERT(_index->objectId() == RocksDBKey::objectId(_iterator->key()));
StringRef primaryKey = _index->_unique TRI_voc_rid_t revisionId =
? RocksDBValue::primaryKey(_iterator->value()) _index->_unique ? RocksDBValue::revisionId(_iterator->value())
: RocksDBKey::primaryKey(_iterator->key()); : RocksDBKey::revisionId(_iterator->key());
RocksDBToken token(_primaryIndex->lookupKey(_trx, primaryKey)); cb(RocksDBToken(revisionId));
cb(token);
--limit; --limit;
if (_reverse) { if (_reverse) {
_iterator->Prev(); _iterator->Prev();
} else { } else {
@ -307,7 +303,6 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased,
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
} }
StringRef key(doc.get(StaticStrings::KeyString));
if (_unique) { if (_unique) {
// Unique VPack index values are stored as follows: // Unique VPack index values are stored as follows:
// - Key: 7 + 8-byte object ID of index + VPack array with index // - Key: 7 + 8-byte object ID of index + VPack array with index
@ -323,20 +318,21 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased,
// + separator (NUL) byte + primary key // + separator (NUL) byte + primary key
// - Value: empty // - Value: empty
elements.emplace_back( elements.emplace_back(
RocksDBKey::IndexValue(_objectId, key, leased.slice())); RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId));
hashes.push_back(leased.slice().normalizedHash()); hashes.push_back(leased.slice().normalizedHash());
} }
} else { } else {
// other path for handling array elements, too // other path for handling array elements, too
std::vector<VPackSlice> sliceStack; std::vector<VPackSlice> sliceStack;
buildIndexValues(leased, doc, 0, elements, sliceStack, hashes); buildIndexValues(leased, revisionId, doc, 0, elements, sliceStack, hashes);
} }
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased, void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
TRI_voc_rid_t revisionId,
VPackSlice const& document, VPackSlice const& document,
std::vector<RocksDBKey>& elements, std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack, std::vector<VPackSlice>& sliceStack,
@ -348,7 +344,6 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
} }
leased.close(); leased.close();
StringRef key(document.get(StaticStrings::KeyString));
if (_unique) { if (_unique) {
// Unique VPack index values are stored as follows: // Unique VPack index values are stored as follows:
// - Key: 7 + 8-byte object ID of index + VPack array with index value(s) // - Key: 7 + 8-byte object ID of index + VPack array with index value(s)
@ -361,15 +356,15 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
// + primary key // + primary key
// - Value: empty // - Value: empty
elements.emplace_back( elements.emplace_back(
RocksDBKey::IndexValue(_objectId, key, leased.slice())); RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId));
hashes.push_back(leased.slice().normalizedHash()); hashes.push_back(leased.slice().normalizedHash());
} }
} }
/// @brief helper function to create a set of index combinations to insert /// @brief helper function to create a set of index combinations to insert
void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
VPackSlice const document, TRI_voc_rid_t revisionId,
size_t level, VPackSlice const doc, size_t level,
std::vector<RocksDBKey>& elements, std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack, std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes) { std::vector<uint64_t>& hashes) {
@ -377,12 +372,12 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
// Stop the recursion: // Stop the recursion:
if (level == _paths.size()) { if (level == _paths.size()) {
addIndexValue(leased, document, elements, sliceStack, hashes); addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes);
return; return;
} }
if (_expanding[level] == -1) { // the trivial, non-expanding case if (_expanding[level] == -1) { // the trivial, non-expanding case
VPackSlice slice = document.get(_paths[level]); VPackSlice slice = doc.get(_paths[level]);
if (slice.isNone() || slice.isNull()) { if (slice.isNone() || slice.isNull()) {
if (_sparse) { if (_sparse) {
return; return;
@ -391,7 +386,8 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
} else { } else {
sliceStack.emplace_back(slice); sliceStack.emplace_back(slice);
} }
buildIndexValues(leased, document, level + 1, elements, sliceStack, hashes); buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack,
hashes);
sliceStack.pop_back(); sliceStack.pop_back();
return; return;
} }
@ -412,14 +408,14 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
for (size_t i = level; i < _paths.size(); i++) { for (size_t i = level; i < _paths.size(); i++) {
sliceStack.emplace_back(illegalSlice); sliceStack.emplace_back(illegalSlice);
} }
addIndexValue(leased, document, elements, sliceStack, hashes); addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes);
for (size_t i = level; i < _paths.size(); i++) { for (size_t i = level; i < _paths.size(); i++) {
sliceStack.pop_back(); sliceStack.pop_back();
} }
}; };
size_t const n = _paths[level].size(); size_t const n = _paths[level].size();
// We have 0 <= _expanding[level] < n. // We have 0 <= _expanding[level] < n.
VPackSlice current(document); VPackSlice current(doc);
for (size_t i = 0; i <= static_cast<size_t>(_expanding[level]); i++) { for (size_t i = 0; i <= static_cast<size_t>(_expanding[level]); i++) {
if (!current.isObject()) { if (!current.isObject()) {
finishWithNones(); finishWithNones();
@ -447,7 +443,7 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
if (it == seen.end()) { if (it == seen.end()) {
seen.insert(something); seen.insert(something);
sliceStack.emplace_back(something); sliceStack.emplace_back(something);
buildIndexValues(leased, document, level + 1, elements, sliceStack, buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack,
hashes); hashes);
sliceStack.pop_back(); sliceStack.pop_back();
} }
@ -526,8 +522,7 @@ int RocksDBVPackIndex::insert(transaction::Methods* trx,
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString)); RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId)
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey)
: RocksDBValue::IndexValue(); : RocksDBValue::IndexValue();
RocksDBMethods* mthds = rocksutils::toRocksMethods(trx); RocksDBMethods* mthds = rocksutils::toRocksMethods(trx);
@ -594,8 +589,7 @@ int RocksDBVPackIndex::insertRaw(RocksDBMethods* batch,
// now we are going to construct the value to insert into rocksdb // now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure // unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString)); RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId)
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey)
: RocksDBValue::IndexValue(); : RocksDBValue::IndexValue();
for (RocksDBKey const& key : elements) { for (RocksDBKey const& key : elements) {
@ -802,13 +796,8 @@ RocksDBVPackIndexIterator* RocksDBVPackIndex::lookup(
} }
} }
// Secured by trx. The shared_ptr index stays valid in return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, reverse,
// _collection at least as long as trx is running. leftBorder, rightBorder);
// Same for the iterator
auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical());
auto idx = physical->primaryIndex();
return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, idx,
reverse, leftBorder, rightBorder);
} }
bool RocksDBVPackIndex::accessFitsIndex( bool RocksDBVPackIndex::accessFitsIndex(
@ -1510,10 +1499,13 @@ void RocksDBVPackIndex::recalculateEstimates() {
_estimator->clear(); _estimator->clear();
auto bounds = RocksDBKeyBounds::IndexEntries(_objectId); auto bounds = RocksDBKeyBounds::IndexEntries(_objectId);
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { rocksutils::iterateBounds(bounds,
uint64_t hash = RocksDBVPackIndex::HashForKey(it->key()); [&](rocksdb::Iterator* it) {
_estimator->insert(hash); uint64_t hash =
}, arangodb::RocksDBColumnFamily::index()); RocksDBVPackIndex::HashForKey(it->key());
_estimator->insert(hash);
},
arangodb::RocksDBColumnFamily::index());
} }
Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx, Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx,

View File

@ -66,7 +66,6 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
transaction::Methods* trx, transaction::Methods* trx,
ManagedDocumentResult* mmdr, ManagedDocumentResult* mmdr,
arangodb::RocksDBVPackIndex const* index, arangodb::RocksDBVPackIndex const* index,
arangodb::RocksDBPrimaryIndex* primaryIndex,
bool reverse, bool reverse,
arangodb::velocypack::Slice const& left, arangodb::velocypack::Slice const& left,
arangodb::velocypack::Slice const& right); arangodb::velocypack::Slice const& right);
@ -88,12 +87,11 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
bool outOfRange() const; bool outOfRange() const;
arangodb::RocksDBVPackIndex const* _index; arangodb::RocksDBVPackIndex const* _index;
arangodb::RocksDBPrimaryIndex* _primaryIndex;
rocksdb::Comparator const* _cmp; rocksdb::Comparator const* _cmp;
std::unique_ptr<rocksdb::Iterator> _iterator; std::unique_ptr<rocksdb::Iterator> _iterator;
bool const _reverse; bool const _reverse;
RocksDBKeyBounds _bounds; RocksDBKeyBounds _bounds;
rocksdb::Slice _upperBound; // used for iterate_upper_bound rocksdb::Slice _upperBound; // used for iterate_upper_bound
}; };
class RocksDBVPackIndex : public RocksDBIndex { class RocksDBVPackIndex : public RocksDBIndex {
@ -111,7 +109,8 @@ class RocksDBVPackIndex : public RocksDBIndex {
bool hasSelectivityEstimate() const override { return true; } bool hasSelectivityEstimate() const override { return true; }
double selectivityEstimate(arangodb::StringRef const* = nullptr) const override; double selectivityEstimate(
arangodb::StringRef const* = nullptr) const override;
size_t memory() const override; size_t memory() const override;
@ -180,9 +179,9 @@ class RocksDBVPackIndex : public RocksDBIndex {
void recalculateEstimates() override; void recalculateEstimates() override;
protected: protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key, Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override; rocksdb::Slice const& value) override;
private: private:
bool isDuplicateOperator(arangodb::aql::AstNode const*, bool isDuplicateOperator(arangodb::aql::AstNode const*,
@ -216,7 +215,8 @@ protected:
/// @brief helper function to build the key and value for rocksdb from the /// @brief helper function to build the key and value for rocksdb from the
/// vector of slices /// vector of slices
/// @param hashes list of VPackSlice hashes for the estimator. /// @param hashes list of VPackSlice hashes for the estimator.
void addIndexValue(velocypack::Builder& leased, VPackSlice const& document, void addIndexValue(velocypack::Builder& leased, TRI_voc_rid_t revisionId,
VPackSlice const& document,
std::vector<RocksDBKey>& elements, std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack, std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes); std::vector<uint64_t>& hashes);
@ -226,8 +226,9 @@ protected:
/// @param elements vector of resulting index entries /// @param elements vector of resulting index entries
/// @param sliceStack working list of values to insert into the index /// @param sliceStack working list of values to insert into the index
/// @param hashes list of VPackSlice hashes for the estimator. /// @param hashes list of VPackSlice hashes for the estimator.
void buildIndexValues(velocypack::Builder& leased, VPackSlice const document, void buildIndexValues(velocypack::Builder& leased, TRI_voc_rid_t revisionId,
size_t level, std::vector<RocksDBKey>& elements, VPackSlice const document, size_t level,
std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack, std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes); std::vector<uint64_t>& hashes);
@ -248,7 +249,6 @@ protected:
/// On insertion of a document we have to insert it into the estimator, /// On insertion of a document we have to insert it into the estimator,
/// On removal we have to remove it in the estimator as well. /// On removal we have to remove it in the estimator as well.
std::unique_ptr<RocksDBCuckooIndexEstimator<uint64_t>> _estimator; std::unique_ptr<RocksDBCuckooIndexEstimator<uint64_t>> _estimator;
}; };
} // namespace arangodb } // namespace arangodb

View File

@ -53,8 +53,8 @@ RocksDBValue RocksDBValue::IndexValue() {
return RocksDBValue(RocksDBEntryType::IndexValue); return RocksDBValue(RocksDBEntryType::IndexValue);
} }
RocksDBValue RocksDBValue::UniqueIndexValue(StringRef const& primaryKey) { RocksDBValue RocksDBValue::UniqueIndexValue(TRI_voc_rid_t revisionId) {
return RocksDBValue(RocksDBEntryType::UniqueIndexValue, primaryKey); return RocksDBValue(RocksDBEntryType::UniqueIndexValue, revisionId);
} }
RocksDBValue RocksDBValue::View(VPackSlice const& data) { RocksDBValue RocksDBValue::View(VPackSlice const& data) {
@ -110,6 +110,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type) : _type(type), _buffer() {}
RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data) RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data)
: _type(type), _buffer() { : _type(type), _buffer() {
switch (_type) { switch (_type) {
case RocksDBEntryType::UniqueIndexValue:
case RocksDBEntryType::PrimaryIndexValue: { case RocksDBEntryType::PrimaryIndexValue: {
_buffer.reserve(sizeof(uint64_t)); _buffer.reserve(sizeof(uint64_t));
uint64ToPersistent(_buffer, data); // revision id uint64ToPersistent(_buffer, data); // revision id
@ -121,21 +122,6 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data)
} }
} }
RocksDBValue::RocksDBValue(RocksDBEntryType type,
arangodb::StringRef const& data)
: _type(type), _buffer() {
switch (_type) {
case RocksDBEntryType::UniqueIndexValue: {
_buffer.reserve(data.length());
_buffer.append(data.data(), data.length()); // primary key
break;
}
default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
}
}
RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data) RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data)
: _type(type), _buffer() { : _type(type), _buffer() {
switch (_type) { switch (_type) {
@ -156,7 +142,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data)
} }
TRI_voc_rid_t RocksDBValue::revisionId(char const* data, uint64_t size) { TRI_voc_rid_t RocksDBValue::revisionId(char const* data, uint64_t size) {
TRI_ASSERT(data != nullptr); TRI_ASSERT(data != nullptr && size >= sizeof(uint64_t));
return uint64FromPersistent(data); return uint64FromPersistent(data);
} }

View File

@ -51,7 +51,7 @@ class RocksDBValue {
static RocksDBValue PrimaryIndexValue(TRI_voc_rid_t revisionId); static RocksDBValue PrimaryIndexValue(TRI_voc_rid_t revisionId);
static RocksDBValue EdgeIndexValue(); static RocksDBValue EdgeIndexValue();
static RocksDBValue IndexValue(); static RocksDBValue IndexValue();
static RocksDBValue UniqueIndexValue(arangodb::StringRef const& primaryKey); static RocksDBValue UniqueIndexValue(TRI_voc_rid_t revisionId);
static RocksDBValue View(VPackSlice const& data); static RocksDBValue View(VPackSlice const& data);
static RocksDBValue ReplicationApplierConfig(VPackSlice const& data); static RocksDBValue ReplicationApplierConfig(VPackSlice const& data);
@ -110,7 +110,6 @@ class RocksDBValue {
RocksDBValue(); RocksDBValue();
explicit RocksDBValue(RocksDBEntryType type); explicit RocksDBValue(RocksDBEntryType type);
RocksDBValue(RocksDBEntryType type, uint64_t data); RocksDBValue(RocksDBEntryType type, uint64_t data);
RocksDBValue(RocksDBEntryType type, StringRef const& data);
RocksDBValue(RocksDBEntryType type, VPackSlice const& data); RocksDBValue(RocksDBEntryType type, VPackSlice const& data);
private: private:

View File

@ -2713,12 +2713,18 @@ static void JS_TruncateVocbaseCol(
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
} }
// optionally specify non trx remove
bool unsafeTruncate = false;
if (args.Length() > 0) {
unsafeTruncate = TRI_ObjectToBoolean(args[0]);
}
auto t = unsafeTruncate ? AccessMode::Type::EXCLUSIVE : AccessMode::Type::WRITE;
SingleCollectionTransaction trx( SingleCollectionTransaction trx(
transaction::V8Context::Create(collection->vocbase(), true), transaction::V8Context::Create(collection->vocbase(), true),
collection->cid(), AccessMode::Type::WRITE); collection->cid(), t);
Result res = trx.begin(); Result res = trx.begin();
if (!res.ok()) { if (!res.ok()) {
TRI_V8_THROW_EXCEPTION(res); TRI_V8_THROW_EXCEPTION(res);
} }

View File

@ -5,6 +5,7 @@
window.ClusterCoordinator = Backbone.Model.extend({ window.ClusterCoordinator = Backbone.Model.extend({
defaults: { defaults: {
'name': '', 'name': '',
'id': '',
'status': 'ok', 'status': 'ok',
'address': '', 'address': '',
'protocol': '' 'protocol': ''

View File

@ -326,10 +326,10 @@
this.clusterView.render(); this.clusterView.render();
}, },
node: function (name, initialized) { node: function (id, initialized) {
this.checkUser(); this.checkUser();
if (!initialized || this.isCluster === undefined) { if (!initialized || this.isCluster === undefined) {
this.waitForInit(this.node.bind(this), name); this.waitForInit(this.node.bind(this), id);
return; return;
} }
if (this.isCluster === false) { if (this.isCluster === false) {
@ -342,7 +342,7 @@
this.nodeView.remove(); this.nodeView.remove();
} }
this.nodeView = new window.NodeView({ this.nodeView = new window.NodeView({
coordname: name, coordid: id,
coordinators: this.coordinatorCollection, coordinators: this.coordinatorCollection,
dbServers: this.dbServers dbServers: this.dbServers
}); });

View File

@ -17,7 +17,7 @@
if (window.App.isCluster) { if (window.App.isCluster) {
this.coordinators = options.coordinators; this.coordinators = options.coordinators;
this.dbServers = options.dbServers; this.dbServers = options.dbServers;
this.coordname = options.coordname; this.coordid = options.coordid;
this.updateServerTime(); this.updateServerTime();
// start polling with interval // start polling with interval
@ -47,8 +47,7 @@
var callback = function () { var callback = function () {
this.continueRender(); this.continueRender();
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordname)); this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordid));
// window.arangoHelper.buildNodeSubNav(this.coordname, 'Dashboard', 'Logs')
$(window).trigger('resize'); $(window).trigger('resize');
}.bind(this); }.bind(this);
@ -59,8 +58,8 @@
if (!this.initDBDone) { if (!this.initDBDone) {
this.waitForDBServers(callback); this.waitForDBServers(callback);
} else { } else {
this.coordname = window.location.hash.split('/')[1]; this.coordid = window.location.hash.split('/')[1];
this.coordinator = this.coordinators.findWhere({name: this.coordname}); this.coordinator = this.coordinators.findWhere({id: this.coordid});
callback(); callback();
} }
}, },
@ -79,7 +78,7 @@
raw: this.coordinator.get('address'), raw: this.coordinator.get('address'),
isDBServer: false, isDBServer: false,
endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'), endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'),
target: this.coordinator.get('name') target: this.coordinator.get('id')
} }
}); });
} else { } else {
@ -113,7 +112,7 @@
if (self.coordinators.length === 0) { if (self.coordinators.length === 0) {
self.waitForCoordinators(callback); self.waitForCoordinators(callback);
} else { } else {
self.coordinator = self.coordinators.findWhere({name: self.coordname}); self.coordinator = self.coordinators.findWhere({id: self.coordid});
self.initCoordDone = true; self.initCoordDone = true;
if (callback) { if (callback) {
callback(); callback();

View File

@ -362,7 +362,7 @@ class RandomDeviceCombined : public RandomDevice {
class RandomDeviceMersenne : public RandomDevice { class RandomDeviceMersenne : public RandomDevice {
public: public:
RandomDeviceMersenne() RandomDeviceMersenne()
: engine(RandomDevice::seed()) {} : engine((uint_fast32_t)RandomDevice::seed()) {}
uint32_t random() { return engine(); } uint32_t random() { return engine(); }
void seed(uint64_t seed) { engine.seed(static_cast<decltype(engine)::result_type>(seed)); } void seed(uint64_t seed) { engine.seed(static_cast<decltype(engine)::result_type>(seed)); }

View File

@ -29,4 +29,5 @@ if [[ -e $fpath ]]; then
fi fi
#execute #execute
sudo cgexec -g memory:arango_mem su - $USER -c "$@" sudo cgexec -g memory:arango_mem su -l -p -c "$@" $USER

View File

@ -222,12 +222,11 @@ SECTION("test_primary_index") {
/// @brief test edge index /// @brief test edge index
SECTION("test_edge_index") { SECTION("test_edge_index") {
RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), StringRef("foobar")); RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), 33);
auto const& s1 = key1.string(); auto const& s1 = key1.string();
CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + strlen("foobar") + sizeof(char)); CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + sizeof(uint64_t));
CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0foobar\x06", 20)); CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0!\0\0\0\0\0\0\0", 21));
} }
} }

View File

@ -47,10 +47,11 @@ void doFromToTest(double num){
template <typename T> template <typename T>
void doFromToTest(T num){ void doFromToTest(T num){
T x = num , y; T x = num , y;
char s[sizeof(x)]; char s[sizeof(x)] = {0};
char* p = &s[0]; char* p1 = &s[0];
toPersistent(x,p); char* p2 = p1;
y = fromPersistent<T>(p); toPersistent(x,p1);
y = fromPersistent<T>(p2);
CHECK((x == y)); CHECK((x == y));
} }