diff --git a/arangod/Pregel/Algos/DMID/DMID.cpp b/arangod/Pregel/Algos/DMID/DMID.cpp index a5170d58cd..84b3ca6112 100644 --- a/arangod/Pregel/Algos/DMID/DMID.cpp +++ b/arangod/Pregel/Algos/DMID/DMID.cpp @@ -634,7 +634,7 @@ struct DMIDGraphFormat : public GraphFormat { b.add(_resultField, VPackValue(VPackValueType::Array)); for (std::pair const& pair : ptr->membershipDegree) { b.openArray(); - b.add(VPackValue(pair.first.key)); + b.add(VPackValue(arangodb::basics::StringUtils::int64(pair.first.key))); b.add(VPackValue(pair.second)); b.close(); } diff --git a/arangod/Pregel/Algos/SLPA.cpp b/arangod/Pregel/Algos/SLPA.cpp index 9ac07ea674..a23b490ba9 100644 --- a/arangod/Pregel/Algos/SLPA.cpp +++ b/arangod/Pregel/Algos/SLPA.cpp @@ -192,12 +192,22 @@ struct SLPAGraphFormat : public GraphFormat { } else if (vec.size() == 1 || maxCommunities == 1) { b.add(resField, VPackValue(vec[0].first)); } else { - b.add(resField, VPackValue(VPackValueType::Object)); - for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) { - b.add(arangodb::basics::StringUtils::itoa(vec[c].first), - VPackValue(vec[c].second)); + // output for use with the DMID/Metrics code + b.add(resField, VPackValue(VPackValueType::Array)); + for (unsigned c = 0; c < vec.size() && c < maxCommunities; + c++) { + b.openArray(); + b.add(VPackValue(vec[c].first)); + b.add(VPackValue(vec[c].second)); + b.close(); } b.close(); + /*b.add(resField, VPackValue(VPackValueType::Object)); + for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) { + b.add(arangodb::basics::StringUtils::itoa(vec[c].first), + VPackValue(vec[c].second)); + } + b.close();*/ } } return true; diff --git a/arangod/RocksDBEngine/RocksDBCollection.cpp b/arangod/RocksDBEngine/RocksDBCollection.cpp index 5700bba473..b77da75475 100644 --- a/arangod/RocksDBEngine/RocksDBCollection.cpp +++ b/arangod/RocksDBEngine/RocksDBCollection.cpp @@ -631,9 +631,10 @@ void RocksDBCollection::truncate(transaction::Methods* trx, TRI_ASSERT(_objectId != 0); TRI_voc_cid_t cid = _logicalCollection->cid(); RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx); - // delete documents - RocksDBMethods* mthd = state->rocksdbMethods(); + RocksDBMethods* mthd; + mthd = state->rocksdbMethods(); + RocksDBKeyBounds documentBounds = RocksDBKeyBounds::CollectionDocuments(this->objectId()); diff --git a/arangod/RocksDBEngine/RocksDBCommon.h b/arangod/RocksDBEngine/RocksDBCommon.h index c5be720511..22e38569e0 100644 --- a/arangod/RocksDBEngine/RocksDBCommon.h +++ b/arangod/RocksDBEngine/RocksDBCommon.h @@ -187,6 +187,7 @@ void iterateBounds( rocksdb::ReadOptions options = rocksdb::ReadOptions()) { rocksdb::Slice const end = bounds.end(); options.iterate_upper_bound = &end;// save to use on rocksb::DB directly + options.prefix_same_as_start = true; std::unique_ptr it(globalRocksDB()->NewIterator(options, handle)); for (it->Seek(bounds.start()); it->Valid(); it->Next()) { callback(it.get()); diff --git a/arangod/RocksDBEngine/RocksDBEdgeIndex.cpp b/arangod/RocksDBEngine/RocksDBEdgeIndex.cpp index fc731fa8a3..ccab089985 100644 --- a/arangod/RocksDBEngine/RocksDBEdgeIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBEdgeIndex.cpp @@ -328,30 +328,21 @@ void RocksDBEdgeIndexIterator::lookupInRocksDB(StringRef fromTo) { rocksdb::Comparator const* cmp = _index->comparator(); _builder.openArray(); - RocksDBToken token; auto end = _bounds.end(); - while (_iterator->Valid() && - (cmp->Compare(_iterator->key(), end) < 0)) { - StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key()); - Result res = rocksColl->lookupDocumentToken(_trx, edgeKey, token); - if (res.ok()) { - ManagedDocumentResult mmdr; - if (rocksColl->readDocument(_trx, token, mmdr)) { - _builder.add(VPackValue(token.revisionId())); - VPackSlice doc(mmdr.vpack()); - TRI_ASSERT(doc.isObject()); - _builder.add(doc); - } else { - // Data Inconsistency. - // We have a revision id without a document... - TRI_ASSERT(false); - } -#ifdef USE_MAINTAINER_MODE + while (_iterator->Valid() && (cmp->Compare(_iterator->key(), end) < 0)) { + TRI_voc_rid_t revisionId = RocksDBKey::revisionId(_iterator->key()); + RocksDBToken token(revisionId); + + ManagedDocumentResult mmdr; + if (rocksColl->readDocument(_trx, token, mmdr)) { + _builder.add(VPackValue(token.revisionId())); + VPackSlice doc(mmdr.vpack()); + TRI_ASSERT(doc.isObject()); + _builder.add(doc); } else { - // Index inconsistency, we indexed a primaryKey => revision that is - // not known any more - TRI_ASSERT(res.ok()); -#endif + // Data Inconsistency. + // We have a revision id without a document... + TRI_ASSERT(false); } _iterator->Next(); } @@ -455,12 +446,10 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures, int RocksDBEdgeIndex::insert(transaction::Methods* trx, TRI_voc_rid_t revisionId, VPackSlice const& doc, bool isRollback) { - VPackSlice primaryKey = doc.get(StaticStrings::KeyString); VPackSlice fromTo = doc.get(_directionAttr); - TRI_ASSERT(primaryKey.isString() && fromTo.isString()); + TRI_ASSERT(fromTo.isString()); auto fromToRef = StringRef(fromTo); - RocksDBKey key = - RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey)); + RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId); // blacklist key in cache blackListKey(fromToRef); @@ -486,12 +475,11 @@ int RocksDBEdgeIndex::insertRaw(RocksDBMethods*, TRI_voc_rid_t, int RocksDBEdgeIndex::remove(transaction::Methods* trx, TRI_voc_rid_t revisionId, VPackSlice const& doc, bool isRollback) { - VPackSlice primaryKey = doc.get(StaticStrings::KeyString); + // VPackSlice primaryKey = doc.get(StaticStrings::KeyString); VPackSlice fromTo = doc.get(_directionAttr); auto fromToRef = StringRef(fromTo); - TRI_ASSERT(primaryKey.isString() && fromTo.isString()); - RocksDBKey key = - RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey)); + TRI_ASSERT(fromTo.isString()); + RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId); // blacklist key in cache blackListKey(fromToRef); @@ -521,12 +509,12 @@ void RocksDBEdgeIndex::batchInsert( std::shared_ptr queue) { RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); for (std::pair const& doc : documents) { - VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString); + // VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString); VPackSlice fromTo = doc.second.get(_directionAttr); - TRI_ASSERT(primaryKey.isString() && fromTo.isString()); + TRI_ASSERT(fromTo.isString()); auto fromToRef = StringRef(fromTo); RocksDBKey key = - RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey)); + RocksDBKey::EdgeIndexValue(_objectId, fromToRef, doc.first); blackListKey(fromToRef); Result r = mthd->Put(_cf, rocksdb::Slice(key.string()), rocksdb::Slice(), @@ -648,7 +636,8 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) { return; } auto rocksColl = toRocksDBCollection(_collection); - uint64_t expectedCount = static_cast(selectivityEstimate() * rocksColl->numberDocuments()); + uint64_t expectedCount = static_cast(selectivityEstimate() * + rocksColl->numberDocuments()); // Prepare the cache to be resized for this amount of objects to be inserted. _cache->sizeHint(expectedCount); @@ -657,77 +646,77 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) { std::string previous = ""; VPackBuilder builder; ManagedDocumentResult mmdr; - RocksDBToken token; bool needsInsert = false; - rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { - auto key = it->key(); - StringRef v = RocksDBKey::vertexId(key); - if (previous.empty()) { - // First call. - builder.clear(); - previous = v.toString(); - auto finding = _cache->find(previous.data(), (uint32_t)previous.size()); - if (finding.found()) { - needsInsert = false; - } else { - needsInsert = true; - builder.openArray(); - } - - } - - if (v != previous) { - if (needsInsert) { - // Switch to next vertex id. - // Store what we have. - builder.close(); - - while(_cache->isResizing() || _cache->isMigrating()) { - // We should wait here, the cache will reject - // any inserts anyways. - usleep(10000); + rocksutils::iterateBounds( + bounds, + [&](rocksdb::Iterator* it) { + rocksdb::Slice key = it->key(); + StringRef v = RocksDBKey::vertexId(key); + if (previous.empty()) { + // First call. + builder.clear(); + previous = v.toString(); + auto finding = + _cache->find(previous.data(), (uint32_t)previous.size()); + if (finding.found()) { + needsInsert = false; + } else { + needsInsert = true; + builder.openArray(); + } } - auto entry = cache::CachedValue::construct( - previous.data(), static_cast(previous.size()), - builder.slice().start(), - static_cast(builder.slice().byteSize())); - if (!_cache->insert(entry)) { - delete entry; + if (v != previous) { + if (needsInsert) { + // Switch to next vertex id. + // Store what we have. + builder.close(); + + while (_cache->isResizing() || _cache->isMigrating()) { + // We should wait here, the cache will reject + // any inserts anyways. + usleep(10000); + } + + auto entry = cache::CachedValue::construct( + previous.data(), static_cast(previous.size()), + builder.slice().start(), + static_cast(builder.slice().byteSize())); + if (!_cache->insert(entry)) { + delete entry; + } + builder.clear(); + } + // Need to store + previous = v.toString(); + auto finding = + _cache->find(previous.data(), (uint32_t)previous.size()); + if (finding.found()) { + needsInsert = false; + } else { + needsInsert = true; + builder.openArray(); + } } - builder.clear(); - } - // Need to store - previous = v.toString(); - auto finding = _cache->find(previous.data(), (uint32_t)previous.size()); - if (finding.found()) { - needsInsert = false; - } else { - needsInsert = true; - builder.openArray(); - } - - - - } - if (needsInsert) { - StringRef edgeKey = RocksDBKey::primaryKey(key); - Result res = rocksColl->lookupDocumentToken(trx, edgeKey, token); - if (res.ok() && rocksColl->readDocument(trx, token, mmdr)) { - builder.add(VPackValue(token.revisionId())); - VPackSlice doc(mmdr.vpack()); - TRI_ASSERT(doc.isObject()); - builder.add(doc); + if (needsInsert) { + TRI_voc_rid_t revisionId = RocksDBKey::revisionId(key); + RocksDBToken token(revisionId); + if (rocksColl->readDocument(trx, token, mmdr)) { + builder.add(VPackValue(token.revisionId())); + VPackSlice doc(mmdr.vpack()); + TRI_ASSERT(doc.isObject()); + builder.add(doc); #ifdef USE_MAINTAINER_MODE - } else { - // Data Inconsistency. - // We have a revision id without a document... - TRI_ASSERT(false); + } else { + // Data Inconsistency. + // We have a revision id without a document... + TRI_ASSERT(false); #endif - } - } - }, RocksDBColumnFamily::edge()); + } + } + }, + RocksDBColumnFamily::edge()); if (!previous.empty() && needsInsert) { // We still have something to store @@ -847,10 +836,13 @@ void RocksDBEdgeIndex::recalculateEstimates() { _estimator->clear(); auto bounds = RocksDBKeyBounds::EdgeIndex(_objectId); - rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { - uint64_t hash = RocksDBEdgeIndex::HashForKey(it->key()); - _estimator->insert(hash); - }, arangodb::RocksDBColumnFamily::edge()); + rocksutils::iterateBounds(bounds, + [&](rocksdb::Iterator* it) { + uint64_t hash = + RocksDBEdgeIndex::HashForKey(it->key()); + _estimator->insert(hash); + }, + arangodb::RocksDBColumnFamily::edge()); } Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx, diff --git a/arangod/RocksDBEngine/RocksDBFulltextIndex.cpp b/arangod/RocksDBEngine/RocksDBFulltextIndex.cpp index 04cfb1cc70..063627580e 100644 --- a/arangod/RocksDBEngine/RocksDBFulltextIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBFulltextIndex.cpp @@ -201,14 +201,13 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx, RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); RocksDBValue value = RocksDBValue::IndexValue(); int res = TRI_ERROR_NO_ERROR; // size_t const count = words.size(); for (std::string const& word : words) { RocksDBKey key = - RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); + RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId); Result r = mthd->Put(_cf, key, value.string(), rocksutils::index); if (!r.ok()) { @@ -220,14 +219,16 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx, for (size_t j = 0; j < i; ++j) { std::string const& word = words[j]; RocksDBKey key = - RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); + RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), + revisionId); rtrx->Delete(key.string()); } }*/ return res; } -int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t, +int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, + TRI_voc_rid_t revisionId, arangodb::velocypack::Slice const& doc) { std::set words = wordlist(doc); if (words.empty()) { @@ -236,12 +237,12 @@ int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t, // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); + // StringRef docKey(doc.get(StaticStrings::KeyString)); RocksDBValue value = RocksDBValue::IndexValue(); for (std::string const& word : words) { RocksDBKey key = - RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); + RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId); batch->Put(_cf, key, value.string()); } @@ -261,11 +262,10 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx, RocksDBMethods* mthd = rocksutils::toRocksMethods(trx); // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); int res = TRI_ERROR_NO_ERROR; for (std::string const& word : words) { RocksDBKey key = - RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); + RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId); Result r = mthd->Delete(_cf, key); if (!r.ok()) { @@ -276,15 +276,15 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx, return res; } -int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch, TRI_voc_rid_t, +int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch, + TRI_voc_rid_t revisionId, arangodb::velocypack::Slice const& doc) { std::set words = wordlist(doc); // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); for (std::string const& word : words) { RocksDBKey key = - RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey); + RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId); batch->Delete(_cf, key); } return TRI_ERROR_NO_ERROR; @@ -462,29 +462,25 @@ Result RocksDBFulltextIndex::executeQuery(transaction::Methods* trx, FulltextQuery const& query, size_t maxResults, VPackBuilder& builder) { - std::set resultSet; + std::set resultSet; for (FulltextQueryToken const& token : query) { applyQueryToken(trx, token, resultSet); } auto physical = static_cast(_collection->getPhysical()); - auto idx = physical->primaryIndex(); ManagedDocumentResult mmdr; - if (maxResults == 0) { // 0 appearantly means "all results" maxResults = SIZE_MAX; } builder.openArray(); // get the first N results - std::set::iterator it = resultSet.cbegin(); + std::set::iterator it = resultSet.cbegin(); while (maxResults > 0 && it != resultSet.cend()) { - RocksDBToken token = idx->lookupKey(trx, StringRef(*it)); - if (token.revisionId()) { - if (physical->readDocument(trx, token, mmdr)) { - mmdr.addToBuilder(builder, true); - maxResults--; - } + RocksDBToken token(*it); + if (token.revisionId() && physical->readDocument(trx, token, mmdr)) { + mmdr.addToBuilder(builder, true); + maxResults--; } ++it; } @@ -503,9 +499,9 @@ static RocksDBKeyBounds MakeBounds(uint64_t oid, THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED); } -Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx, - FulltextQueryToken const& token, - std::set& resultSet) { +Result RocksDBFulltextIndex::applyQueryToken( + transaction::Methods* trx, FulltextQueryToken const& token, + std::set& resultSet) { RocksDBMethods* mthds = rocksutils::toRocksMethods(trx); // why can't I have an assignment operator when I want one RocksDBKeyBounds bounds = MakeBounds(_objectId, token); @@ -518,23 +514,23 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx, iter->Seek(bounds.start()); // set is used to perform an intersection with the result set - std::set intersect; + std::set intersect; // apply left to right logic, merging all current results with ALL previous while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) { TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key())); - + rocksdb::Status s = iter->status(); if (!s.ok()) { return rocksutils::convertStatus(s); } - StringRef key = RocksDBKey::primaryKey(iter->key()); + TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key()); if (token.operation == FulltextQueryToken::AND) { - intersect.insert(key.toString()); + intersect.insert(revisionId); } else if (token.operation == FulltextQueryToken::OR) { - resultSet.insert(key.toString()); + resultSet.insert(revisionId); } else if (token.operation == FulltextQueryToken::EXCLUDE) { - resultSet.erase(key.toString()); + resultSet.erase(revisionId); } iter->Next(); } @@ -542,7 +538,7 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx, if (resultSet.empty() || intersect.empty()) { resultSet.clear(); } else { - std::set output; + std::set output; std::set_intersection(resultSet.begin(), resultSet.end(), intersect.begin(), intersect.end(), std::inserter(output, output.begin())); diff --git a/arangod/RocksDBEngine/RocksDBFulltextIndex.h b/arangod/RocksDBEngine/RocksDBFulltextIndex.h index c5fccd33a8..4feb411a35 100644 --- a/arangod/RocksDBEngine/RocksDBFulltextIndex.h +++ b/arangod/RocksDBEngine/RocksDBFulltextIndex.h @@ -141,7 +141,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex { arangodb::Result applyQueryToken(transaction::Methods* trx, FulltextQueryToken const&, - std::set& resultSet); + std::set& resultSet); }; } // namespace arangodb diff --git a/arangod/RocksDBEngine/RocksDBKey.cpp b/arangod/RocksDBEngine/RocksDBKey.cpp index 32d3865871..2c27acdf85 100644 --- a/arangod/RocksDBEngine/RocksDBKey.cpp +++ b/arangod/RocksDBEngine/RocksDBKey.cpp @@ -60,16 +60,16 @@ RocksDBKey RocksDBKey::PrimaryIndexValue(uint64_t indexId, RocksDBKey RocksDBKey::EdgeIndexValue(uint64_t indexId, arangodb::StringRef const& vertexId, - arangodb::StringRef const& primaryKey) { + TRI_voc_rid_t revisionId) { return RocksDBKey(RocksDBEntryType::EdgeIndexValue, indexId, vertexId, - primaryKey); + revisionId); } RocksDBKey RocksDBKey::IndexValue(uint64_t indexId, - arangodb::StringRef const& primaryKey, - VPackSlice const& indexValues) { - return RocksDBKey(RocksDBEntryType::IndexValue, indexId, primaryKey, - indexValues); + VPackSlice const& indexValues, + TRI_voc_rid_t revisionId) { + return RocksDBKey(RocksDBEntryType::IndexValue, indexId, indexValues, + revisionId); } RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId, @@ -79,18 +79,20 @@ RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId, RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId, arangodb::StringRef const& word, - arangodb::StringRef const& primaryKey) { - return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey); + TRI_voc_rid_t revisionId) { + return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, + revisionId); } -RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot) { +RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset, + bool isSlot) { RocksDBKey key(RocksDBEntryType::GeoIndexValue); size_t length = sizeof(char) + sizeof(indexId) + sizeof(offset); key._buffer.reserve(length); uint64ToPersistent(key._buffer, indexId); uint64_t norm = uint64_t(offset) << 32; - norm |= isSlot ? 0xFFU : 0; //encode slot|pot in lowest bit + norm |= isSlot ? 0xFFU : 0; // encode slot|pot in lowest bit uint64ToPersistent(key._buffer, norm); return key; } @@ -171,6 +173,7 @@ arangodb::StringRef RocksDBKey::primaryKey(RocksDBKey const& key) { arangodb::StringRef RocksDBKey::primaryKey(rocksdb::Slice const& slice) { return primaryKey(slice.data(), slice.size()); } + StringRef RocksDBKey::vertexId(RocksDBKey const& key) { return vertexId(key._buffer.data(), key._buffer.size()); } @@ -191,8 +194,9 @@ std::pair RocksDBKey::geoValues(rocksdb::Slice const& slice) { TRI_ASSERT(slice.size() >= sizeof(char) + sizeof(uint64_t) * 2); RocksDBEntryType type = static_cast(*slice.data()); TRI_ASSERT(type == RocksDBEntryType::GeoIndexValue); - uint64_t val = uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t)); - bool isSlot = ((val & 0xFFULL) > 0);// lowest byte is 0xFF if true + uint64_t val = + uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t)); + bool isSlot = ((val & 0xFFULL) > 0); // lowest byte is 0xFF if true return std::pair(isSlot, (val >> 32)); } @@ -275,27 +279,22 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second) } RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, - arangodb::StringRef const& docKey, - VPackSlice const& indexData) + VPackSlice const& second, uint64_t third) : _type(type), _buffer() { switch (_type) { case RocksDBEntryType::IndexValue: { // Non-unique VPack index values are stored as follows: // - Key: 6 + 8-byte object ID of index + VPack array with index value(s) - // + separator byte + primary key + primary key length + // + revisionID // - Value: empty size_t length = sizeof(char) + sizeof(uint64_t) + - static_cast(indexData.byteSize()) + sizeof(char) + - docKey.length() + sizeof(char); + static_cast(second.byteSize()) + sizeof(uint64_t); _buffer.reserve(length); _buffer.push_back(static_cast(_type)); uint64ToPersistent(_buffer, first); - _buffer.append(reinterpret_cast(indexData.begin()), - static_cast(indexData.byteSize())); - _buffer.push_back(_stringSeparator); - _buffer.append(docKey.data(), docKey.length()); - _buffer.push_back(static_cast(docKey.length() & 0xff)); - + _buffer.append(reinterpret_cast(second.begin()), + static_cast(second.byteSize())); + uint64ToPersistent(_buffer, third); TRI_ASSERT(_buffer.size() == length); break; } @@ -324,22 +323,19 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, } RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, - arangodb::StringRef const& second, - arangodb::StringRef const& third) + arangodb::StringRef const& second, uint64_t third) : _type(type), _buffer() { switch (_type) { case RocksDBEntryType::FulltextIndexValue: case RocksDBEntryType::EdgeIndexValue: { - size_t length = sizeof(char) + sizeof(uint64_t) + second.size() + - sizeof(char) + third.size() + sizeof(uint8_t); + size_t length = + sizeof(char) + sizeof(uint64_t) + second.size() + sizeof(third); _buffer.reserve(length); _buffer.push_back(static_cast(_type)); uint64ToPersistent(_buffer, first); _buffer.append(second.data(), second.length()); _buffer.push_back(_stringSeparator); - _buffer.append(third.data(), third.length()); - TRI_ASSERT(third.size() <= 254); - _buffer.push_back(static_cast(third.size() & 0xff)); + uint64ToPersistent(_buffer, third); break; } @@ -434,6 +430,13 @@ TRI_voc_rid_t RocksDBKey::revisionId(char const* data, size_t size) { TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t)))); return uint64FromPersistent(data + sizeof(char) + sizeof(uint64_t)); } + case RocksDBEntryType::EdgeIndexValue: + case RocksDBEntryType::IndexValue: + case RocksDBEntryType::FulltextIndexValue: { + TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t)))); + // last 8 bytes should by revision + return uint64FromPersistent(data + size - sizeof(uint64_t)); + } default: THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR); @@ -451,14 +454,6 @@ arangodb::StringRef RocksDBKey::primaryKey(char const* data, size_t size) { return arangodb::StringRef(data + sizeof(char) + sizeof(uint64_t), keySize); } - case RocksDBEntryType::EdgeIndexValue: - case RocksDBEntryType::IndexValue: - case RocksDBEntryType::FulltextIndexValue: { - TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t))); - size_t keySize = static_cast(data[size - 1]); - return arangodb::StringRef(data + (size - (keySize + sizeof(uint8_t))), - keySize); - } default: THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR); @@ -471,11 +466,10 @@ StringRef RocksDBKey::vertexId(char const* data, size_t size) { RocksDBEntryType type = static_cast(data[0]); switch (type) { case RocksDBEntryType::EdgeIndexValue: { - TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t))); - size_t keySize = static_cast(data[size - 1]); - size_t idSize = size - (sizeof(char) + sizeof(uint64_t) + sizeof(char) + - keySize + sizeof(uint8_t)); - return StringRef(data + sizeof(char) + sizeof(uint64_t), idSize); + // 1 byte prefix + 8 byte objectID + _from/_to + 1 byte \0 + 8 byte rev + TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t)) * 2); + size_t keySize = size - (sizeof(char) + sizeof(uint64_t)) * 2; + return StringRef(data + sizeof(char) + sizeof(uint64_t), keySize); } default: diff --git a/arangod/RocksDBEngine/RocksDBKey.h b/arangod/RocksDBEngine/RocksDBKey.h index 00ee28291c..2a1ee6b8a7 100644 --- a/arangod/RocksDBEngine/RocksDBKey.h +++ b/arangod/RocksDBEngine/RocksDBKey.h @@ -44,14 +44,14 @@ class RocksDBKey { : _type(static_cast(slice.data()[0])), _buffer(slice.data(), slice.size()) {} - RocksDBKey(RocksDBKey const& other) + RocksDBKey(RocksDBKey const& other) : _type(other._type), _buffer(other._buffer) {} - - RocksDBKey(RocksDBKey&& other) + + RocksDBKey(RocksDBKey&& other) : _type(other._type), _buffer(std::move(other._buffer)) {} - - RocksDBKey& operator=(RocksDBKey const& other) = delete; - RocksDBKey& operator=(RocksDBKey&& other) = delete; + + RocksDBKey& operator=(RocksDBKey const& other) = delete; + RocksDBKey& operator=(RocksDBKey&& other) = delete; ////////////////////////////////////////////////////////////////////////////// /// @brief Create a fully-specified database key @@ -89,7 +89,7 @@ class RocksDBKey { ////////////////////////////////////////////////////////////////////////////// static RocksDBKey EdgeIndexValue(uint64_t indexId, arangodb::StringRef const& vertexId, - arangodb::StringRef const& primaryKey); + TRI_voc_rid_t revisionId); ////////////////////////////////////////////////////////////////////////////// /// @brief Create a fully-specified key for an entry in a user-defined, @@ -98,9 +98,8 @@ class RocksDBKey { /// The indexId is an object ID generated by the engine, rather than the /// actual index ID. ////////////////////////////////////////////////////////////////////////////// - static RocksDBKey IndexValue(uint64_t indexId, - arangodb::StringRef const& primaryKey, - VPackSlice const& indexValues); + static RocksDBKey IndexValue(uint64_t indexId, VPackSlice const& indexValues, + TRI_voc_rid_t revisionId); ////////////////////////////////////////////////////////////////////////////// /// @brief Create a fully-specified key for an entry in a unique user-defined @@ -117,12 +116,13 @@ class RocksDBKey { ////////////////////////////////////////////////////////////////////////////// static RocksDBKey FulltextIndexValue(uint64_t indexId, arangodb::StringRef const& word, - arangodb::StringRef const& primaryKey); + TRI_voc_rid_t revisionId); ////////////////////////////////////////////////////////////////////////////// /// @brief Create a fully-specified key for a geoIndexValue ////////////////////////////////////////////////////////////////////////////// - static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot); + static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset, + bool isSlot); ////////////////////////////////////////////////////////////////////////////// /// @brief Create a fully-specified key for a view @@ -161,7 +161,6 @@ class RocksDBKey { return type(slice.data(), slice.size()); } - ////////////////////////////////////////////////////////////////////////////// /// @brief Extracts the object id /// @@ -190,7 +189,7 @@ class RocksDBKey { ////////////////////////////////////////////////////////////////////////////// /// @brief Extracts the objectId from a key /// - /// May be called only on the the following key types: Document, + /// May be called only on the the following key types: Document, /// all kinds of index entries. Other types will throw. ////////////////////////////////////////////////////////////////////////////// static uint64_t objectId(RocksDBKey const&); @@ -240,7 +239,7 @@ class RocksDBKey { static VPackSlice indexedVPack(rocksdb::Slice const&); ////////////////////////////////////////////////////////////////////////////// - /// @brief Extracts the geo pot offset + /// @brief Extracts the geo pot offset /// /// May be called only on GeoIndexValues ////////////////////////////////////////////////////////////////////////////// @@ -257,14 +256,12 @@ class RocksDBKey { RocksDBKey(RocksDBEntryType type, uint64_t first); RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second); RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& slice); - RocksDBKey(RocksDBEntryType type, uint64_t first, - arangodb::StringRef const& docKey, VPackSlice const& indexData); + RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& second, + TRI_voc_rid_t third); RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second); - RocksDBKey(RocksDBEntryType type, uint64_t first, std::string const& second, - std::string const& third); - RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second, - arangodb::StringRef const& third); + RocksDBKey(RocksDBEntryType type, uint64_t first, + arangodb::StringRef const& second, uint64_t third); private: static inline RocksDBEntryType type(char const* data, size_t size) { diff --git a/arangod/RocksDBEngine/RocksDBKeyBounds.cpp b/arangod/RocksDBEngine/RocksDBKeyBounds.cpp index 1494555f5d..7488916a2a 100644 --- a/arangod/RocksDBEngine/RocksDBKeyBounds.cpp +++ b/arangod/RocksDBEngine/RocksDBKeyBounds.cpp @@ -231,6 +231,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type) } } +/// bounds to iterate over entire index RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first) : _type(type) { switch (_type) { @@ -270,7 +271,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first) _internals.reserve(length); _internals.push_back(static_cast(_type)); uint64ToPersistent(_internals.buffer(), first); - uint64ToPersistent(_internals.buffer(), 0); _internals.separate(); @@ -301,6 +301,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first) } } +/// bounds to iterate over specified word or edge RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second) : _type(type) { @@ -320,7 +321,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, uint64ToPersistent(_internals.buffer(), first); _internals.buffer().append(second.data(), second.length()); _internals.push_back(_stringSeparator); - _internals.push_back(0xFFU); + uint64ToPersistent(_internals.buffer(), UINT64_MAX); + break; } @@ -329,6 +331,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, } } +/// iterate over the specified bounds of the velocypack index RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, VPackSlice const& second, VPackSlice const& third) @@ -347,7 +350,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, uint64ToPersistent(_internals.buffer(), first); _internals.buffer().append(reinterpret_cast(second.begin()), static_cast(second.byteSize())); - _internals.push_back(_stringSeparator); _internals.separate(); @@ -355,8 +357,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first, uint64ToPersistent(_internals.buffer(), first); _internals.buffer().append(reinterpret_cast(third.begin()), static_cast(third.byteSize())); - _internals.push_back(_stringSeparator + 1); // compare greater than - // actual key + uint64ToPersistent(_internals.buffer(), UINT64_MAX); break; } diff --git a/arangod/RocksDBEngine/RocksDBVPackIndex.cpp b/arangod/RocksDBEngine/RocksDBVPackIndex.cpp index 2817e600e1..a1f12fdeb3 100644 --- a/arangod/RocksDBEngine/RocksDBVPackIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBVPackIndex.cpp @@ -80,11 +80,9 @@ static std::vector const KeyAttribute{ RocksDBVPackIndexIterator::RocksDBVPackIndexIterator( LogicalCollection* collection, transaction::Methods* trx, ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index, - arangodb::RocksDBPrimaryIndex* primaryIndex, bool reverse, - VPackSlice const& left, VPackSlice const& right) + bool reverse, VPackSlice const& left, VPackSlice const& right) : IndexIterator(collection, trx, mmdr, index), _index(index), - _primaryIndex(primaryIndex), _cmp(index->comparator()), _reverse(reverse), _bounds(index->_unique ? RocksDBKeyBounds::UniqueIndexRange( @@ -140,15 +138,13 @@ bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) { while (limit > 0) { TRI_ASSERT(_index->objectId() == RocksDBKey::objectId(_iterator->key())); - - StringRef primaryKey = _index->_unique - ? RocksDBValue::primaryKey(_iterator->value()) - : RocksDBKey::primaryKey(_iterator->key()); - RocksDBToken token(_primaryIndex->lookupKey(_trx, primaryKey)); - cb(token); + + TRI_voc_rid_t revisionId = + _index->_unique ? RocksDBValue::revisionId(_iterator->value()) + : RocksDBKey::revisionId(_iterator->key()); + cb(RocksDBToken(revisionId)); --limit; - if (_reverse) { _iterator->Prev(); } else { @@ -307,7 +303,6 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased, THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); } - StringRef key(doc.get(StaticStrings::KeyString)); if (_unique) { // Unique VPack index values are stored as follows: // - Key: 7 + 8-byte object ID of index + VPack array with index @@ -323,20 +318,21 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased, // + separator (NUL) byte + primary key // - Value: empty elements.emplace_back( - RocksDBKey::IndexValue(_objectId, key, leased.slice())); + RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId)); hashes.push_back(leased.slice().normalizedHash()); } } else { // other path for handling array elements, too std::vector sliceStack; - buildIndexValues(leased, doc, 0, elements, sliceStack, hashes); + buildIndexValues(leased, revisionId, doc, 0, elements, sliceStack, hashes); } return TRI_ERROR_NO_ERROR; } void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased, + TRI_voc_rid_t revisionId, VPackSlice const& document, std::vector& elements, std::vector& sliceStack, @@ -348,7 +344,6 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased, } leased.close(); - StringRef key(document.get(StaticStrings::KeyString)); if (_unique) { // Unique VPack index values are stored as follows: // - Key: 7 + 8-byte object ID of index + VPack array with index value(s) @@ -361,15 +356,15 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased, // + primary key // - Value: empty elements.emplace_back( - RocksDBKey::IndexValue(_objectId, key, leased.slice())); + RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId)); hashes.push_back(leased.slice().normalizedHash()); } } /// @brief helper function to create a set of index combinations to insert void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, - VPackSlice const document, - size_t level, + TRI_voc_rid_t revisionId, + VPackSlice const doc, size_t level, std::vector& elements, std::vector& sliceStack, std::vector& hashes) { @@ -377,12 +372,12 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, // Stop the recursion: if (level == _paths.size()) { - addIndexValue(leased, document, elements, sliceStack, hashes); + addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes); return; } if (_expanding[level] == -1) { // the trivial, non-expanding case - VPackSlice slice = document.get(_paths[level]); + VPackSlice slice = doc.get(_paths[level]); if (slice.isNone() || slice.isNull()) { if (_sparse) { return; @@ -391,7 +386,8 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, } else { sliceStack.emplace_back(slice); } - buildIndexValues(leased, document, level + 1, elements, sliceStack, hashes); + buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack, + hashes); sliceStack.pop_back(); return; } @@ -412,14 +408,14 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, for (size_t i = level; i < _paths.size(); i++) { sliceStack.emplace_back(illegalSlice); } - addIndexValue(leased, document, elements, sliceStack, hashes); + addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes); for (size_t i = level; i < _paths.size(); i++) { sliceStack.pop_back(); } }; size_t const n = _paths[level].size(); // We have 0 <= _expanding[level] < n. - VPackSlice current(document); + VPackSlice current(doc); for (size_t i = 0; i <= static_cast(_expanding[level]); i++) { if (!current.isObject()) { finishWithNones(); @@ -447,7 +443,7 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased, if (it == seen.end()) { seen.insert(something); sliceStack.emplace_back(something); - buildIndexValues(leased, document, level + 1, elements, sliceStack, + buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack, hashes); sliceStack.pop_back(); } @@ -526,8 +522,7 @@ int RocksDBVPackIndex::insert(transaction::Methods* trx, // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); - RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey) + RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId) : RocksDBValue::IndexValue(); RocksDBMethods* mthds = rocksutils::toRocksMethods(trx); @@ -594,8 +589,7 @@ int RocksDBVPackIndex::insertRaw(RocksDBMethods* batch, // now we are going to construct the value to insert into rocksdb // unique indexes have a different key structure - StringRef docKey(doc.get(StaticStrings::KeyString)); - RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey) + RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId) : RocksDBValue::IndexValue(); for (RocksDBKey const& key : elements) { @@ -802,13 +796,8 @@ RocksDBVPackIndexIterator* RocksDBVPackIndex::lookup( } } - // Secured by trx. The shared_ptr index stays valid in - // _collection at least as long as trx is running. - // Same for the iterator - auto physical = static_cast(_collection->getPhysical()); - auto idx = physical->primaryIndex(); - return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, idx, - reverse, leftBorder, rightBorder); + return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, reverse, + leftBorder, rightBorder); } bool RocksDBVPackIndex::accessFitsIndex( @@ -1510,10 +1499,13 @@ void RocksDBVPackIndex::recalculateEstimates() { _estimator->clear(); auto bounds = RocksDBKeyBounds::IndexEntries(_objectId); - rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) { - uint64_t hash = RocksDBVPackIndex::HashForKey(it->key()); - _estimator->insert(hash); - }, arangodb::RocksDBColumnFamily::index()); + rocksutils::iterateBounds(bounds, + [&](rocksdb::Iterator* it) { + uint64_t hash = + RocksDBVPackIndex::HashForKey(it->key()); + _estimator->insert(hash); + }, + arangodb::RocksDBColumnFamily::index()); } Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx, diff --git a/arangod/RocksDBEngine/RocksDBVPackIndex.h b/arangod/RocksDBEngine/RocksDBVPackIndex.h index e9553ffea9..17771da09b 100644 --- a/arangod/RocksDBEngine/RocksDBVPackIndex.h +++ b/arangod/RocksDBEngine/RocksDBVPackIndex.h @@ -66,7 +66,6 @@ class RocksDBVPackIndexIterator final : public IndexIterator { transaction::Methods* trx, ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index, - arangodb::RocksDBPrimaryIndex* primaryIndex, bool reverse, arangodb::velocypack::Slice const& left, arangodb::velocypack::Slice const& right); @@ -88,12 +87,11 @@ class RocksDBVPackIndexIterator final : public IndexIterator { bool outOfRange() const; arangodb::RocksDBVPackIndex const* _index; - arangodb::RocksDBPrimaryIndex* _primaryIndex; rocksdb::Comparator const* _cmp; std::unique_ptr _iterator; bool const _reverse; RocksDBKeyBounds _bounds; - rocksdb::Slice _upperBound; // used for iterate_upper_bound + rocksdb::Slice _upperBound; // used for iterate_upper_bound }; class RocksDBVPackIndex : public RocksDBIndex { @@ -111,7 +109,8 @@ class RocksDBVPackIndex : public RocksDBIndex { bool hasSelectivityEstimate() const override { return true; } - double selectivityEstimate(arangodb::StringRef const* = nullptr) const override; + double selectivityEstimate( + arangodb::StringRef const* = nullptr) const override; size_t memory() const override; @@ -180,9 +179,9 @@ class RocksDBVPackIndex : public RocksDBIndex { void recalculateEstimates() override; -protected: - Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key, - rocksdb::Slice const& value) override; + protected: + Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key, + rocksdb::Slice const& value) override; private: bool isDuplicateOperator(arangodb::aql::AstNode const*, @@ -216,7 +215,8 @@ protected: /// @brief helper function to build the key and value for rocksdb from the /// vector of slices /// @param hashes list of VPackSlice hashes for the estimator. - void addIndexValue(velocypack::Builder& leased, VPackSlice const& document, + void addIndexValue(velocypack::Builder& leased, TRI_voc_rid_t revisionId, + VPackSlice const& document, std::vector& elements, std::vector& sliceStack, std::vector& hashes); @@ -226,8 +226,9 @@ protected: /// @param elements vector of resulting index entries /// @param sliceStack working list of values to insert into the index /// @param hashes list of VPackSlice hashes for the estimator. - void buildIndexValues(velocypack::Builder& leased, VPackSlice const document, - size_t level, std::vector& elements, + void buildIndexValues(velocypack::Builder& leased, TRI_voc_rid_t revisionId, + VPackSlice const document, size_t level, + std::vector& elements, std::vector& sliceStack, std::vector& hashes); @@ -248,7 +249,6 @@ protected: /// On insertion of a document we have to insert it into the estimator, /// On removal we have to remove it in the estimator as well. std::unique_ptr> _estimator; - }; } // namespace arangodb diff --git a/arangod/RocksDBEngine/RocksDBValue.cpp b/arangod/RocksDBEngine/RocksDBValue.cpp index c8065a3560..7a70cbb845 100644 --- a/arangod/RocksDBEngine/RocksDBValue.cpp +++ b/arangod/RocksDBEngine/RocksDBValue.cpp @@ -53,8 +53,8 @@ RocksDBValue RocksDBValue::IndexValue() { return RocksDBValue(RocksDBEntryType::IndexValue); } -RocksDBValue RocksDBValue::UniqueIndexValue(StringRef const& primaryKey) { - return RocksDBValue(RocksDBEntryType::UniqueIndexValue, primaryKey); +RocksDBValue RocksDBValue::UniqueIndexValue(TRI_voc_rid_t revisionId) { + return RocksDBValue(RocksDBEntryType::UniqueIndexValue, revisionId); } RocksDBValue RocksDBValue::View(VPackSlice const& data) { @@ -110,6 +110,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type) : _type(type), _buffer() {} RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data) : _type(type), _buffer() { switch (_type) { + case RocksDBEntryType::UniqueIndexValue: case RocksDBEntryType::PrimaryIndexValue: { _buffer.reserve(sizeof(uint64_t)); uint64ToPersistent(_buffer, data); // revision id @@ -121,21 +122,6 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data) } } -RocksDBValue::RocksDBValue(RocksDBEntryType type, - arangodb::StringRef const& data) - : _type(type), _buffer() { - switch (_type) { - case RocksDBEntryType::UniqueIndexValue: { - _buffer.reserve(data.length()); - _buffer.append(data.data(), data.length()); // primary key - break; - } - - default: - THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER); - } -} - RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data) : _type(type), _buffer() { switch (_type) { @@ -156,7 +142,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data) } TRI_voc_rid_t RocksDBValue::revisionId(char const* data, uint64_t size) { - TRI_ASSERT(data != nullptr); + TRI_ASSERT(data != nullptr && size >= sizeof(uint64_t)); return uint64FromPersistent(data); } diff --git a/arangod/RocksDBEngine/RocksDBValue.h b/arangod/RocksDBEngine/RocksDBValue.h index 303a6607c6..22a2ea018e 100644 --- a/arangod/RocksDBEngine/RocksDBValue.h +++ b/arangod/RocksDBEngine/RocksDBValue.h @@ -51,7 +51,7 @@ class RocksDBValue { static RocksDBValue PrimaryIndexValue(TRI_voc_rid_t revisionId); static RocksDBValue EdgeIndexValue(); static RocksDBValue IndexValue(); - static RocksDBValue UniqueIndexValue(arangodb::StringRef const& primaryKey); + static RocksDBValue UniqueIndexValue(TRI_voc_rid_t revisionId); static RocksDBValue View(VPackSlice const& data); static RocksDBValue ReplicationApplierConfig(VPackSlice const& data); @@ -110,7 +110,6 @@ class RocksDBValue { RocksDBValue(); explicit RocksDBValue(RocksDBEntryType type); RocksDBValue(RocksDBEntryType type, uint64_t data); - RocksDBValue(RocksDBEntryType type, StringRef const& data); RocksDBValue(RocksDBEntryType type, VPackSlice const& data); private: diff --git a/arangod/V8Server/v8-collection.cpp b/arangod/V8Server/v8-collection.cpp index 633412af63..bc00f1e9ad 100644 --- a/arangod/V8Server/v8-collection.cpp +++ b/arangod/V8Server/v8-collection.cpp @@ -2712,13 +2712,19 @@ static void JS_TruncateVocbaseCol( if (collection == nullptr) { TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); } + + // optionally specify non trx remove + bool unsafeTruncate = false; + if (args.Length() > 0) { + unsafeTruncate = TRI_ObjectToBoolean(args[0]); + } + auto t = unsafeTruncate ? AccessMode::Type::EXCLUSIVE : AccessMode::Type::WRITE; SingleCollectionTransaction trx( transaction::V8Context::Create(collection->vocbase(), true), - collection->cid(), AccessMode::Type::WRITE); - + collection->cid(), t); + Result res = trx.begin(); - if (!res.ok()) { TRI_V8_THROW_EXCEPTION(res); } diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/models/clusterCoordinator.js b/js/apps/system/_admin/aardvark/APP/frontend/js/models/clusterCoordinator.js index 7d2bdc3ca8..7db6cd323b 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/models/clusterCoordinator.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/models/clusterCoordinator.js @@ -5,6 +5,7 @@ window.ClusterCoordinator = Backbone.Model.extend({ defaults: { 'name': '', + 'id': '', 'status': 'ok', 'address': '', 'protocol': '' diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/routers/router.js b/js/apps/system/_admin/aardvark/APP/frontend/js/routers/router.js index 849286644b..6666366d02 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/routers/router.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/routers/router.js @@ -326,10 +326,10 @@ this.clusterView.render(); }, - node: function (name, initialized) { + node: function (id, initialized) { this.checkUser(); if (!initialized || this.isCluster === undefined) { - this.waitForInit(this.node.bind(this), name); + this.waitForInit(this.node.bind(this), id); return; } if (this.isCluster === false) { @@ -342,7 +342,7 @@ this.nodeView.remove(); } this.nodeView = new window.NodeView({ - coordname: name, + coordid: id, coordinators: this.coordinatorCollection, dbServers: this.dbServers }); diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/views/nodeView.js b/js/apps/system/_admin/aardvark/APP/frontend/js/views/nodeView.js index c1f3ef00c6..c7f125b162 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/views/nodeView.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/views/nodeView.js @@ -17,7 +17,7 @@ if (window.App.isCluster) { this.coordinators = options.coordinators; this.dbServers = options.dbServers; - this.coordname = options.coordname; + this.coordid = options.coordid; this.updateServerTime(); // start polling with interval @@ -47,8 +47,7 @@ var callback = function () { this.continueRender(); - this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordname)); - // window.arangoHelper.buildNodeSubNav(this.coordname, 'Dashboard', 'Logs') + this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordid)); $(window).trigger('resize'); }.bind(this); @@ -59,8 +58,8 @@ if (!this.initDBDone) { this.waitForDBServers(callback); } else { - this.coordname = window.location.hash.split('/')[1]; - this.coordinator = this.coordinators.findWhere({name: this.coordname}); + this.coordid = window.location.hash.split('/')[1]; + this.coordinator = this.coordinators.findWhere({id: this.coordid}); callback(); } }, @@ -79,7 +78,7 @@ raw: this.coordinator.get('address'), isDBServer: false, endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'), - target: this.coordinator.get('name') + target: this.coordinator.get('id') } }); } else { @@ -113,7 +112,7 @@ if (self.coordinators.length === 0) { self.waitForCoordinators(callback); } else { - self.coordinator = self.coordinators.findWhere({name: self.coordname}); + self.coordinator = self.coordinators.findWhere({id: self.coordid}); self.initCoordDone = true; if (callback) { callback(); diff --git a/lib/Random/RandomGenerator.cpp b/lib/Random/RandomGenerator.cpp index 7f91c831d2..47eb9529fd 100644 --- a/lib/Random/RandomGenerator.cpp +++ b/lib/Random/RandomGenerator.cpp @@ -362,7 +362,7 @@ class RandomDeviceCombined : public RandomDevice { class RandomDeviceMersenne : public RandomDevice { public: RandomDeviceMersenne() - : engine(RandomDevice::seed()) {} + : engine((uint_fast32_t)RandomDevice::seed()) {} uint32_t random() { return engine(); } void seed(uint64_t seed) { engine.seed(static_cast(seed)); } diff --git a/scripts/limitMemory.sh b/scripts/limitMemory.sh index e814e9d83e..bfc53dc91f 100755 --- a/scripts/limitMemory.sh +++ b/scripts/limitMemory.sh @@ -29,4 +29,5 @@ if [[ -e $fpath ]]; then fi #execute -sudo cgexec -g memory:arango_mem su - $USER -c "$@" +sudo cgexec -g memory:arango_mem su -l -p -c "$@" $USER + diff --git a/tests/Basics/RocksDBKeyTest.cpp b/tests/Basics/RocksDBKeyTest.cpp index 99ba05ecea..76d3734645 100644 --- a/tests/Basics/RocksDBKeyTest.cpp +++ b/tests/Basics/RocksDBKeyTest.cpp @@ -222,12 +222,11 @@ SECTION("test_primary_index") { /// @brief test edge index SECTION("test_edge_index") { - RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), StringRef("foobar")); + RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), 33); auto const& s1 = key1.string(); - - CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + strlen("foobar") + sizeof(char)); - CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0foobar\x06", 20)); - + + CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + sizeof(uint64_t)); + CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0!\0\0\0\0\0\0\0", 21)); } } diff --git a/tests/RocksDBEngine/TypeConversionTest.cpp b/tests/RocksDBEngine/TypeConversionTest.cpp index 94e523b723..b3f6ea11c8 100644 --- a/tests/RocksDBEngine/TypeConversionTest.cpp +++ b/tests/RocksDBEngine/TypeConversionTest.cpp @@ -47,10 +47,11 @@ void doFromToTest(double num){ template void doFromToTest(T num){ T x = num , y; - char s[sizeof(x)]; - char* p = &s[0]; - toPersistent(x,p); - y = fromPersistent(p); + char s[sizeof(x)] = {0}; + char* p1 = &s[0]; + char* p2 = p1; + toPersistent(x,p1); + y = fromPersistent(p2); CHECK((x == y)); }