1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
jsteemann 2017-05-30 16:39:48 +02:00
commit 2961f33a4b
22 changed files with 279 additions and 303 deletions

View File

@ -634,7 +634,7 @@ struct DMIDGraphFormat : public GraphFormat<DMIDValue, float> {
b.add(_resultField, VPackValue(VPackValueType::Array));
for (std::pair<PregelID, float> const& pair : ptr->membershipDegree) {
b.openArray();
b.add(VPackValue(pair.first.key));
b.add(VPackValue(arangodb::basics::StringUtils::int64(pair.first.key)));
b.add(VPackValue(pair.second));
b.close();
}

View File

@ -192,12 +192,22 @@ struct SLPAGraphFormat : public GraphFormat<SLPAValue, int8_t> {
} else if (vec.size() == 1 || maxCommunities == 1) {
b.add(resField, VPackValue(vec[0].first));
} else {
b.add(resField, VPackValue(VPackValueType::Object));
for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) {
b.add(arangodb::basics::StringUtils::itoa(vec[c].first),
VPackValue(vec[c].second));
// output for use with the DMID/Metrics code
b.add(resField, VPackValue(VPackValueType::Array));
for (unsigned c = 0; c < vec.size() && c < maxCommunities;
c++) {
b.openArray();
b.add(VPackValue(vec[c].first));
b.add(VPackValue(vec[c].second));
b.close();
}
b.close();
/*b.add(resField, VPackValue(VPackValueType::Object));
for (unsigned c = 0; c < vec.size() && c < maxCommunities; c++) {
b.add(arangodb::basics::StringUtils::itoa(vec[c].first),
VPackValue(vec[c].second));
}
b.close();*/
}
}
return true;

View File

@ -631,9 +631,10 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
TRI_ASSERT(_objectId != 0);
TRI_voc_cid_t cid = _logicalCollection->cid();
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
// delete documents
RocksDBMethods* mthd = state->rocksdbMethods();
RocksDBMethods* mthd;
mthd = state->rocksdbMethods();
RocksDBKeyBounds documentBounds =
RocksDBKeyBounds::CollectionDocuments(this->objectId());

View File

@ -187,6 +187,7 @@ void iterateBounds(
rocksdb::ReadOptions options = rocksdb::ReadOptions()) {
rocksdb::Slice const end = bounds.end();
options.iterate_upper_bound = &end;// save to use on rocksb::DB directly
options.prefix_same_as_start = true;
std::unique_ptr<rocksdb::Iterator> it(globalRocksDB()->NewIterator(options, handle));
for (it->Seek(bounds.start()); it->Valid(); it->Next()) {
callback(it.get());

View File

@ -328,30 +328,21 @@ void RocksDBEdgeIndexIterator::lookupInRocksDB(StringRef fromTo) {
rocksdb::Comparator const* cmp = _index->comparator();
_builder.openArray();
RocksDBToken token;
auto end = _bounds.end();
while (_iterator->Valid() &&
(cmp->Compare(_iterator->key(), end) < 0)) {
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key());
Result res = rocksColl->lookupDocumentToken(_trx, edgeKey, token);
if (res.ok()) {
ManagedDocumentResult mmdr;
if (rocksColl->readDocument(_trx, token, mmdr)) {
_builder.add(VPackValue(token.revisionId()));
VPackSlice doc(mmdr.vpack());
TRI_ASSERT(doc.isObject());
_builder.add(doc);
} else {
// Data Inconsistency.
// We have a revision id without a document...
TRI_ASSERT(false);
}
#ifdef USE_MAINTAINER_MODE
while (_iterator->Valid() && (cmp->Compare(_iterator->key(), end) < 0)) {
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(_iterator->key());
RocksDBToken token(revisionId);
ManagedDocumentResult mmdr;
if (rocksColl->readDocument(_trx, token, mmdr)) {
_builder.add(VPackValue(token.revisionId()));
VPackSlice doc(mmdr.vpack());
TRI_ASSERT(doc.isObject());
_builder.add(doc);
} else {
// Index inconsistency, we indexed a primaryKey => revision that is
// not known any more
TRI_ASSERT(res.ok());
#endif
// Data Inconsistency.
// We have a revision id without a document...
TRI_ASSERT(false);
}
_iterator->Next();
}
@ -455,12 +446,10 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
int RocksDBEdgeIndex::insert(transaction::Methods* trx,
TRI_voc_rid_t revisionId, VPackSlice const& doc,
bool isRollback) {
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.get(_directionAttr);
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo);
RocksDBKey key =
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey));
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId);
// blacklist key in cache
blackListKey(fromToRef);
@ -486,12 +475,11 @@ int RocksDBEdgeIndex::insertRaw(RocksDBMethods*, TRI_voc_rid_t,
int RocksDBEdgeIndex::remove(transaction::Methods* trx,
TRI_voc_rid_t revisionId, VPackSlice const& doc,
bool isRollback) {
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
// VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.get(_directionAttr);
auto fromToRef = StringRef(fromTo);
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
RocksDBKey key =
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey));
TRI_ASSERT(fromTo.isString());
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef, revisionId);
// blacklist key in cache
blackListKey(fromToRef);
@ -521,12 +509,12 @@ void RocksDBEdgeIndex::batchInsert(
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
for (std::pair<TRI_voc_rid_t, VPackSlice> const& doc : documents) {
VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString);
// VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString);
VPackSlice fromTo = doc.second.get(_directionAttr);
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
TRI_ASSERT(fromTo.isString());
auto fromToRef = StringRef(fromTo);
RocksDBKey key =
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, StringRef(primaryKey));
RocksDBKey::EdgeIndexValue(_objectId, fromToRef, doc.first);
blackListKey(fromToRef);
Result r = mthd->Put(_cf, rocksdb::Slice(key.string()), rocksdb::Slice(),
@ -648,7 +636,8 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) {
return;
}
auto rocksColl = toRocksDBCollection(_collection);
uint64_t expectedCount = static_cast<uint64_t>(selectivityEstimate() * rocksColl->numberDocuments());
uint64_t expectedCount = static_cast<uint64_t>(selectivityEstimate() *
rocksColl->numberDocuments());
// Prepare the cache to be resized for this amount of objects to be inserted.
_cache->sizeHint(expectedCount);
@ -657,77 +646,77 @@ void RocksDBEdgeIndex::warmup(arangodb::transaction::Methods* trx) {
std::string previous = "";
VPackBuilder builder;
ManagedDocumentResult mmdr;
RocksDBToken token;
bool needsInsert = false;
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) {
auto key = it->key();
StringRef v = RocksDBKey::vertexId(key);
if (previous.empty()) {
// First call.
builder.clear();
previous = v.toString();
auto finding = _cache->find(previous.data(), (uint32_t)previous.size());
if (finding.found()) {
needsInsert = false;
} else {
needsInsert = true;
builder.openArray();
}
}
if (v != previous) {
if (needsInsert) {
// Switch to next vertex id.
// Store what we have.
builder.close();
while(_cache->isResizing() || _cache->isMigrating()) {
// We should wait here, the cache will reject
// any inserts anyways.
usleep(10000);
rocksutils::iterateBounds(
bounds,
[&](rocksdb::Iterator* it) {
rocksdb::Slice key = it->key();
StringRef v = RocksDBKey::vertexId(key);
if (previous.empty()) {
// First call.
builder.clear();
previous = v.toString();
auto finding =
_cache->find(previous.data(), (uint32_t)previous.size());
if (finding.found()) {
needsInsert = false;
} else {
needsInsert = true;
builder.openArray();
}
}
auto entry = cache::CachedValue::construct(
previous.data(), static_cast<uint32_t>(previous.size()),
builder.slice().start(),
static_cast<uint64_t>(builder.slice().byteSize()));
if (!_cache->insert(entry)) {
delete entry;
if (v != previous) {
if (needsInsert) {
// Switch to next vertex id.
// Store what we have.
builder.close();
while (_cache->isResizing() || _cache->isMigrating()) {
// We should wait here, the cache will reject
// any inserts anyways.
usleep(10000);
}
auto entry = cache::CachedValue::construct(
previous.data(), static_cast<uint32_t>(previous.size()),
builder.slice().start(),
static_cast<uint64_t>(builder.slice().byteSize()));
if (!_cache->insert(entry)) {
delete entry;
}
builder.clear();
}
// Need to store
previous = v.toString();
auto finding =
_cache->find(previous.data(), (uint32_t)previous.size());
if (finding.found()) {
needsInsert = false;
} else {
needsInsert = true;
builder.openArray();
}
}
builder.clear();
}
// Need to store
previous = v.toString();
auto finding = _cache->find(previous.data(), (uint32_t)previous.size());
if (finding.found()) {
needsInsert = false;
} else {
needsInsert = true;
builder.openArray();
}
}
if (needsInsert) {
StringRef edgeKey = RocksDBKey::primaryKey(key);
Result res = rocksColl->lookupDocumentToken(trx, edgeKey, token);
if (res.ok() && rocksColl->readDocument(trx, token, mmdr)) {
builder.add(VPackValue(token.revisionId()));
VPackSlice doc(mmdr.vpack());
TRI_ASSERT(doc.isObject());
builder.add(doc);
if (needsInsert) {
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(key);
RocksDBToken token(revisionId);
if (rocksColl->readDocument(trx, token, mmdr)) {
builder.add(VPackValue(token.revisionId()));
VPackSlice doc(mmdr.vpack());
TRI_ASSERT(doc.isObject());
builder.add(doc);
#ifdef USE_MAINTAINER_MODE
} else {
// Data Inconsistency.
// We have a revision id without a document...
TRI_ASSERT(false);
} else {
// Data Inconsistency.
// We have a revision id without a document...
TRI_ASSERT(false);
#endif
}
}
}, RocksDBColumnFamily::edge());
}
}
},
RocksDBColumnFamily::edge());
if (!previous.empty() && needsInsert) {
// We still have something to store
@ -847,10 +836,13 @@ void RocksDBEdgeIndex::recalculateEstimates() {
_estimator->clear();
auto bounds = RocksDBKeyBounds::EdgeIndex(_objectId);
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) {
uint64_t hash = RocksDBEdgeIndex::HashForKey(it->key());
_estimator->insert(hash);
}, arangodb::RocksDBColumnFamily::edge());
rocksutils::iterateBounds(bounds,
[&](rocksdb::Iterator* it) {
uint64_t hash =
RocksDBEdgeIndex::HashForKey(it->key());
_estimator->insert(hash);
},
arangodb::RocksDBColumnFamily::edge());
}
Result RocksDBEdgeIndex::postprocessRemove(transaction::Methods* trx,

View File

@ -201,14 +201,13 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = RocksDBValue::IndexValue();
int res = TRI_ERROR_NO_ERROR;
// size_t const count = words.size();
for (std::string const& word : words) {
RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
Result r = mthd->Put(_cf, key, value.string(), rocksutils::index);
if (!r.ok()) {
@ -220,14 +219,16 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
for (size_t j = 0; j < i; ++j) {
std::string const& word = words[j];
RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word),
revisionId);
rtrx->Delete(key.string());
}
}*/
return res;
}
int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t,
int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc) {
std::set<std::string> words = wordlist(doc);
if (words.empty()) {
@ -236,12 +237,12 @@ int RocksDBFulltextIndex::insertRaw(RocksDBMethods* batch, TRI_voc_rid_t,
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
// StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = RocksDBValue::IndexValue();
for (std::string const& word : words) {
RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
batch->Put(_cf, key, value.string());
}
@ -261,11 +262,10 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
RocksDBMethods* mthd = rocksutils::toRocksMethods(trx);
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
int res = TRI_ERROR_NO_ERROR;
for (std::string const& word : words) {
RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
Result r = mthd->Delete(_cf, key);
if (!r.ok()) {
@ -276,15 +276,15 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
return res;
}
int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch, TRI_voc_rid_t,
int RocksDBFulltextIndex::removeRaw(RocksDBMethods* batch,
TRI_voc_rid_t revisionId,
arangodb::velocypack::Slice const& doc) {
std::set<std::string> words = wordlist(doc);
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
for (std::string const& word : words) {
RocksDBKey key =
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), revisionId);
batch->Delete(_cf, key);
}
return TRI_ERROR_NO_ERROR;
@ -462,29 +462,25 @@ Result RocksDBFulltextIndex::executeQuery(transaction::Methods* trx,
FulltextQuery const& query,
size_t maxResults,
VPackBuilder& builder) {
std::set<std::string> resultSet;
std::set<TRI_voc_rid_t> resultSet;
for (FulltextQueryToken const& token : query) {
applyQueryToken(trx, token, resultSet);
}
auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical());
auto idx = physical->primaryIndex();
ManagedDocumentResult mmdr;
if (maxResults == 0) { // 0 appearantly means "all results"
maxResults = SIZE_MAX;
}
builder.openArray();
// get the first N results
std::set<std::string>::iterator it = resultSet.cbegin();
std::set<TRI_voc_rid_t>::iterator it = resultSet.cbegin();
while (maxResults > 0 && it != resultSet.cend()) {
RocksDBToken token = idx->lookupKey(trx, StringRef(*it));
if (token.revisionId()) {
if (physical->readDocument(trx, token, mmdr)) {
mmdr.addToBuilder(builder, true);
maxResults--;
}
RocksDBToken token(*it);
if (token.revisionId() && physical->readDocument(trx, token, mmdr)) {
mmdr.addToBuilder(builder, true);
maxResults--;
}
++it;
}
@ -503,9 +499,9 @@ static RocksDBKeyBounds MakeBounds(uint64_t oid,
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
FulltextQueryToken const& token,
std::set<std::string>& resultSet) {
Result RocksDBFulltextIndex::applyQueryToken(
transaction::Methods* trx, FulltextQueryToken const& token,
std::set<TRI_voc_rid_t>& resultSet) {
RocksDBMethods* mthds = rocksutils::toRocksMethods(trx);
// why can't I have an assignment operator when I want one
RocksDBKeyBounds bounds = MakeBounds(_objectId, token);
@ -518,7 +514,7 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
iter->Seek(bounds.start());
// set is used to perform an intersection with the result set
std::set<std::string> intersect;
std::set<TRI_voc_rid_t> intersect;
// apply left to right logic, merging all current results with ALL previous
while (iter->Valid() && cmp->Compare(iter->key(), end) < 0) {
TRI_ASSERT(_objectId == RocksDBKey::objectId(iter->key()));
@ -528,13 +524,13 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
return rocksutils::convertStatus(s);
}
StringRef key = RocksDBKey::primaryKey(iter->key());
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(iter->key());
if (token.operation == FulltextQueryToken::AND) {
intersect.insert(key.toString());
intersect.insert(revisionId);
} else if (token.operation == FulltextQueryToken::OR) {
resultSet.insert(key.toString());
resultSet.insert(revisionId);
} else if (token.operation == FulltextQueryToken::EXCLUDE) {
resultSet.erase(key.toString());
resultSet.erase(revisionId);
}
iter->Next();
}
@ -542,7 +538,7 @@ Result RocksDBFulltextIndex::applyQueryToken(transaction::Methods* trx,
if (resultSet.empty() || intersect.empty()) {
resultSet.clear();
} else {
std::set<std::string> output;
std::set<TRI_voc_rid_t> output;
std::set_intersection(resultSet.begin(), resultSet.end(),
intersect.begin(), intersect.end(),
std::inserter(output, output.begin()));

View File

@ -141,7 +141,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
arangodb::Result applyQueryToken(transaction::Methods* trx,
FulltextQueryToken const&,
std::set<std::string>& resultSet);
std::set<TRI_voc_rid_t>& resultSet);
};
} // namespace arangodb

View File

@ -60,16 +60,16 @@ RocksDBKey RocksDBKey::PrimaryIndexValue(uint64_t indexId,
RocksDBKey RocksDBKey::EdgeIndexValue(uint64_t indexId,
arangodb::StringRef const& vertexId,
arangodb::StringRef const& primaryKey) {
TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::EdgeIndexValue, indexId, vertexId,
primaryKey);
revisionId);
}
RocksDBKey RocksDBKey::IndexValue(uint64_t indexId,
arangodb::StringRef const& primaryKey,
VPackSlice const& indexValues) {
return RocksDBKey(RocksDBEntryType::IndexValue, indexId, primaryKey,
indexValues);
VPackSlice const& indexValues,
TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::IndexValue, indexId, indexValues,
revisionId);
}
RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId,
@ -79,18 +79,20 @@ RocksDBKey RocksDBKey::UniqueIndexValue(uint64_t indexId,
RocksDBKey RocksDBKey::FulltextIndexValue(uint64_t indexId,
arangodb::StringRef const& word,
arangodb::StringRef const& primaryKey) {
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word, primaryKey);
TRI_voc_rid_t revisionId) {
return RocksDBKey(RocksDBEntryType::FulltextIndexValue, indexId, word,
revisionId);
}
RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot) {
RocksDBKey RocksDBKey::GeoIndexValue(uint64_t indexId, int32_t offset,
bool isSlot) {
RocksDBKey key(RocksDBEntryType::GeoIndexValue);
size_t length = sizeof(char) + sizeof(indexId) + sizeof(offset);
key._buffer.reserve(length);
uint64ToPersistent(key._buffer, indexId);
uint64_t norm = uint64_t(offset) << 32;
norm |= isSlot ? 0xFFU : 0; //encode slot|pot in lowest bit
norm |= isSlot ? 0xFFU : 0; // encode slot|pot in lowest bit
uint64ToPersistent(key._buffer, norm);
return key;
}
@ -171,6 +173,7 @@ arangodb::StringRef RocksDBKey::primaryKey(RocksDBKey const& key) {
arangodb::StringRef RocksDBKey::primaryKey(rocksdb::Slice const& slice) {
return primaryKey(slice.data(), slice.size());
}
StringRef RocksDBKey::vertexId(RocksDBKey const& key) {
return vertexId(key._buffer.data(), key._buffer.size());
}
@ -191,8 +194,9 @@ std::pair<bool, int32_t> RocksDBKey::geoValues(rocksdb::Slice const& slice) {
TRI_ASSERT(slice.size() >= sizeof(char) + sizeof(uint64_t) * 2);
RocksDBEntryType type = static_cast<RocksDBEntryType>(*slice.data());
TRI_ASSERT(type == RocksDBEntryType::GeoIndexValue);
uint64_t val = uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t));
bool isSlot = ((val & 0xFFULL) > 0);// lowest byte is 0xFF if true
uint64_t val =
uint64FromPersistent(slice.data() + sizeof(char) + sizeof(uint64_t));
bool isSlot = ((val & 0xFFULL) > 0); // lowest byte is 0xFF if true
return std::pair<bool, int32_t>(isSlot, (val >> 32));
}
@ -275,27 +279,22 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second)
}
RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& docKey,
VPackSlice const& indexData)
VPackSlice const& second, uint64_t third)
: _type(type), _buffer() {
switch (_type) {
case RocksDBEntryType::IndexValue: {
// Non-unique VPack index values are stored as follows:
// - Key: 6 + 8-byte object ID of index + VPack array with index value(s)
// + separator byte + primary key + primary key length
// + revisionID
// - Value: empty
size_t length = sizeof(char) + sizeof(uint64_t) +
static_cast<size_t>(indexData.byteSize()) + sizeof(char) +
docKey.length() + sizeof(char);
static_cast<size_t>(second.byteSize()) + sizeof(uint64_t);
_buffer.reserve(length);
_buffer.push_back(static_cast<char>(_type));
uint64ToPersistent(_buffer, first);
_buffer.append(reinterpret_cast<char const*>(indexData.begin()),
static_cast<size_t>(indexData.byteSize()));
_buffer.push_back(_stringSeparator);
_buffer.append(docKey.data(), docKey.length());
_buffer.push_back(static_cast<char>(docKey.length() & 0xff));
_buffer.append(reinterpret_cast<char const*>(second.begin()),
static_cast<size_t>(second.byteSize()));
uint64ToPersistent(_buffer, third);
TRI_ASSERT(_buffer.size() == length);
break;
}
@ -324,22 +323,19 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
}
RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second,
arangodb::StringRef const& third)
arangodb::StringRef const& second, uint64_t third)
: _type(type), _buffer() {
switch (_type) {
case RocksDBEntryType::FulltextIndexValue:
case RocksDBEntryType::EdgeIndexValue: {
size_t length = sizeof(char) + sizeof(uint64_t) + second.size() +
sizeof(char) + third.size() + sizeof(uint8_t);
size_t length =
sizeof(char) + sizeof(uint64_t) + second.size() + sizeof(third);
_buffer.reserve(length);
_buffer.push_back(static_cast<char>(_type));
uint64ToPersistent(_buffer, first);
_buffer.append(second.data(), second.length());
_buffer.push_back(_stringSeparator);
_buffer.append(third.data(), third.length());
TRI_ASSERT(third.size() <= 254);
_buffer.push_back(static_cast<char>(third.size() & 0xff));
uint64ToPersistent(_buffer, third);
break;
}
@ -434,6 +430,13 @@ TRI_voc_rid_t RocksDBKey::revisionId(char const* data, size_t size) {
TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t))));
return uint64FromPersistent(data + sizeof(char) + sizeof(uint64_t));
}
case RocksDBEntryType::EdgeIndexValue:
case RocksDBEntryType::IndexValue:
case RocksDBEntryType::FulltextIndexValue: {
TRI_ASSERT(size >= (sizeof(char) + (2 * sizeof(uint64_t))));
// last 8 bytes should by revision
return uint64FromPersistent(data + size - sizeof(uint64_t));
}
default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR);
@ -451,14 +454,6 @@ arangodb::StringRef RocksDBKey::primaryKey(char const* data, size_t size) {
return arangodb::StringRef(data + sizeof(char) + sizeof(uint64_t),
keySize);
}
case RocksDBEntryType::EdgeIndexValue:
case RocksDBEntryType::IndexValue:
case RocksDBEntryType::FulltextIndexValue: {
TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t)));
size_t keySize = static_cast<size_t>(data[size - 1]);
return arangodb::StringRef(data + (size - (keySize + sizeof(uint8_t))),
keySize);
}
default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_TYPE_ERROR);
@ -471,11 +466,10 @@ StringRef RocksDBKey::vertexId(char const* data, size_t size) {
RocksDBEntryType type = static_cast<RocksDBEntryType>(data[0]);
switch (type) {
case RocksDBEntryType::EdgeIndexValue: {
TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t) + sizeof(uint8_t)));
size_t keySize = static_cast<size_t>(data[size - 1]);
size_t idSize = size - (sizeof(char) + sizeof(uint64_t) + sizeof(char) +
keySize + sizeof(uint8_t));
return StringRef(data + sizeof(char) + sizeof(uint64_t), idSize);
// 1 byte prefix + 8 byte objectID + _from/_to + 1 byte \0 + 8 byte rev
TRI_ASSERT(size > (sizeof(char) + sizeof(uint64_t)) * 2);
size_t keySize = size - (sizeof(char) + sizeof(uint64_t)) * 2;
return StringRef(data + sizeof(char) + sizeof(uint64_t), keySize);
}
default:

View File

@ -89,7 +89,7 @@ class RocksDBKey {
//////////////////////////////////////////////////////////////////////////////
static RocksDBKey EdgeIndexValue(uint64_t indexId,
arangodb::StringRef const& vertexId,
arangodb::StringRef const& primaryKey);
TRI_voc_rid_t revisionId);
//////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for an entry in a user-defined,
@ -98,9 +98,8 @@ class RocksDBKey {
/// The indexId is an object ID generated by the engine, rather than the
/// actual index ID.
//////////////////////////////////////////////////////////////////////////////
static RocksDBKey IndexValue(uint64_t indexId,
arangodb::StringRef const& primaryKey,
VPackSlice const& indexValues);
static RocksDBKey IndexValue(uint64_t indexId, VPackSlice const& indexValues,
TRI_voc_rid_t revisionId);
//////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for an entry in a unique user-defined
@ -117,12 +116,13 @@ class RocksDBKey {
//////////////////////////////////////////////////////////////////////////////
static RocksDBKey FulltextIndexValue(uint64_t indexId,
arangodb::StringRef const& word,
arangodb::StringRef const& primaryKey);
TRI_voc_rid_t revisionId);
//////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for a geoIndexValue
//////////////////////////////////////////////////////////////////////////////
static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset, bool isSlot);
static RocksDBKey GeoIndexValue(uint64_t indexId, int32_t offset,
bool isSlot);
//////////////////////////////////////////////////////////////////////////////
/// @brief Create a fully-specified key for a view
@ -161,7 +161,6 @@ class RocksDBKey {
return type(slice.data(), slice.size());
}
//////////////////////////////////////////////////////////////////////////////
/// @brief Extracts the object id
///
@ -257,14 +256,12 @@ class RocksDBKey {
RocksDBKey(RocksDBEntryType type, uint64_t first);
RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second);
RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& slice);
RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& docKey, VPackSlice const& indexData);
RocksDBKey(RocksDBEntryType type, uint64_t first, VPackSlice const& second,
TRI_voc_rid_t third);
RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second);
RocksDBKey(RocksDBEntryType type, uint64_t first, std::string const& second,
std::string const& third);
RocksDBKey(RocksDBEntryType type, uint64_t first, arangodb::StringRef const& second,
arangodb::StringRef const& third);
RocksDBKey(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second, uint64_t third);
private:
static inline RocksDBEntryType type(char const* data, size_t size) {

View File

@ -231,6 +231,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type)
}
}
/// bounds to iterate over entire index
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
: _type(type) {
switch (_type) {
@ -270,7 +271,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
_internals.reserve(length);
_internals.push_back(static_cast<char>(_type));
uint64ToPersistent(_internals.buffer(), first);
uint64ToPersistent(_internals.buffer(), 0);
_internals.separate();
@ -301,6 +301,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first)
}
}
/// bounds to iterate over specified word or edge
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
arangodb::StringRef const& second)
: _type(type) {
@ -320,7 +321,8 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(second.data(), second.length());
_internals.push_back(_stringSeparator);
_internals.push_back(0xFFU);
uint64ToPersistent(_internals.buffer(), UINT64_MAX);
break;
}
@ -329,6 +331,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
}
}
/// iterate over the specified bounds of the velocypack index
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
VPackSlice const& second,
VPackSlice const& third)
@ -347,7 +350,6 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(reinterpret_cast<char const*>(second.begin()),
static_cast<size_t>(second.byteSize()));
_internals.push_back(_stringSeparator);
_internals.separate();
@ -355,8 +357,7 @@ RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
uint64ToPersistent(_internals.buffer(), first);
_internals.buffer().append(reinterpret_cast<char const*>(third.begin()),
static_cast<size_t>(third.byteSize()));
_internals.push_back(_stringSeparator + 1); // compare greater than
// actual key
uint64ToPersistent(_internals.buffer(), UINT64_MAX);
break;
}

View File

@ -80,11 +80,9 @@ static std::vector<arangodb::basics::AttributeName> const KeyAttribute{
RocksDBVPackIndexIterator::RocksDBVPackIndexIterator(
LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index,
arangodb::RocksDBPrimaryIndex* primaryIndex, bool reverse,
VPackSlice const& left, VPackSlice const& right)
bool reverse, VPackSlice const& left, VPackSlice const& right)
: IndexIterator(collection, trx, mmdr, index),
_index(index),
_primaryIndex(primaryIndex),
_cmp(index->comparator()),
_reverse(reverse),
_bounds(index->_unique ? RocksDBKeyBounds::UniqueIndexRange(
@ -141,14 +139,12 @@ bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) {
while (limit > 0) {
TRI_ASSERT(_index->objectId() == RocksDBKey::objectId(_iterator->key()));
StringRef primaryKey = _index->_unique
? RocksDBValue::primaryKey(_iterator->value())
: RocksDBKey::primaryKey(_iterator->key());
RocksDBToken token(_primaryIndex->lookupKey(_trx, primaryKey));
cb(token);
TRI_voc_rid_t revisionId =
_index->_unique ? RocksDBValue::revisionId(_iterator->value())
: RocksDBKey::revisionId(_iterator->key());
cb(RocksDBToken(revisionId));
--limit;
if (_reverse) {
_iterator->Prev();
} else {
@ -307,7 +303,6 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased,
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
StringRef key(doc.get(StaticStrings::KeyString));
if (_unique) {
// Unique VPack index values are stored as follows:
// - Key: 7 + 8-byte object ID of index + VPack array with index
@ -323,20 +318,21 @@ int RocksDBVPackIndex::fillElement(VPackBuilder& leased,
// + separator (NUL) byte + primary key
// - Value: empty
elements.emplace_back(
RocksDBKey::IndexValue(_objectId, key, leased.slice()));
RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId));
hashes.push_back(leased.slice().normalizedHash());
}
} else {
// other path for handling array elements, too
std::vector<VPackSlice> sliceStack;
buildIndexValues(leased, doc, 0, elements, sliceStack, hashes);
buildIndexValues(leased, revisionId, doc, 0, elements, sliceStack, hashes);
}
return TRI_ERROR_NO_ERROR;
}
void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
TRI_voc_rid_t revisionId,
VPackSlice const& document,
std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack,
@ -348,7 +344,6 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
}
leased.close();
StringRef key(document.get(StaticStrings::KeyString));
if (_unique) {
// Unique VPack index values are stored as follows:
// - Key: 7 + 8-byte object ID of index + VPack array with index value(s)
@ -361,15 +356,15 @@ void RocksDBVPackIndex::addIndexValue(VPackBuilder& leased,
// + primary key
// - Value: empty
elements.emplace_back(
RocksDBKey::IndexValue(_objectId, key, leased.slice()));
RocksDBKey::IndexValue(_objectId, leased.slice(), revisionId));
hashes.push_back(leased.slice().normalizedHash());
}
}
/// @brief helper function to create a set of index combinations to insert
void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
VPackSlice const document,
size_t level,
TRI_voc_rid_t revisionId,
VPackSlice const doc, size_t level,
std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes) {
@ -377,12 +372,12 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
// Stop the recursion:
if (level == _paths.size()) {
addIndexValue(leased, document, elements, sliceStack, hashes);
addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes);
return;
}
if (_expanding[level] == -1) { // the trivial, non-expanding case
VPackSlice slice = document.get(_paths[level]);
VPackSlice slice = doc.get(_paths[level]);
if (slice.isNone() || slice.isNull()) {
if (_sparse) {
return;
@ -391,7 +386,8 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
} else {
sliceStack.emplace_back(slice);
}
buildIndexValues(leased, document, level + 1, elements, sliceStack, hashes);
buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack,
hashes);
sliceStack.pop_back();
return;
}
@ -412,14 +408,14 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
for (size_t i = level; i < _paths.size(); i++) {
sliceStack.emplace_back(illegalSlice);
}
addIndexValue(leased, document, elements, sliceStack, hashes);
addIndexValue(leased, revisionId, doc, elements, sliceStack, hashes);
for (size_t i = level; i < _paths.size(); i++) {
sliceStack.pop_back();
}
};
size_t const n = _paths[level].size();
// We have 0 <= _expanding[level] < n.
VPackSlice current(document);
VPackSlice current(doc);
for (size_t i = 0; i <= static_cast<size_t>(_expanding[level]); i++) {
if (!current.isObject()) {
finishWithNones();
@ -447,7 +443,7 @@ void RocksDBVPackIndex::buildIndexValues(VPackBuilder& leased,
if (it == seen.end()) {
seen.insert(something);
sliceStack.emplace_back(something);
buildIndexValues(leased, document, level + 1, elements, sliceStack,
buildIndexValues(leased, revisionId, doc, level + 1, elements, sliceStack,
hashes);
sliceStack.pop_back();
}
@ -526,8 +522,7 @@ int RocksDBVPackIndex::insert(transaction::Methods* trx,
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey)
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId)
: RocksDBValue::IndexValue();
RocksDBMethods* mthds = rocksutils::toRocksMethods(trx);
@ -594,8 +589,7 @@ int RocksDBVPackIndex::insertRaw(RocksDBMethods* batch,
// now we are going to construct the value to insert into rocksdb
// unique indexes have a different key structure
StringRef docKey(doc.get(StaticStrings::KeyString));
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(docKey)
RocksDBValue value = _unique ? RocksDBValue::UniqueIndexValue(revisionId)
: RocksDBValue::IndexValue();
for (RocksDBKey const& key : elements) {
@ -802,13 +796,8 @@ RocksDBVPackIndexIterator* RocksDBVPackIndex::lookup(
}
}
// Secured by trx. The shared_ptr index stays valid in
// _collection at least as long as trx is running.
// Same for the iterator
auto physical = static_cast<RocksDBCollection*>(_collection->getPhysical());
auto idx = physical->primaryIndex();
return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, idx,
reverse, leftBorder, rightBorder);
return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, reverse,
leftBorder, rightBorder);
}
bool RocksDBVPackIndex::accessFitsIndex(
@ -1510,10 +1499,13 @@ void RocksDBVPackIndex::recalculateEstimates() {
_estimator->clear();
auto bounds = RocksDBKeyBounds::IndexEntries(_objectId);
rocksutils::iterateBounds(bounds, [&](rocksdb::Iterator* it) {
uint64_t hash = RocksDBVPackIndex::HashForKey(it->key());
_estimator->insert(hash);
}, arangodb::RocksDBColumnFamily::index());
rocksutils::iterateBounds(bounds,
[&](rocksdb::Iterator* it) {
uint64_t hash =
RocksDBVPackIndex::HashForKey(it->key());
_estimator->insert(hash);
},
arangodb::RocksDBColumnFamily::index());
}
Result RocksDBVPackIndex::postprocessRemove(transaction::Methods* trx,

View File

@ -66,7 +66,6 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
arangodb::RocksDBVPackIndex const* index,
arangodb::RocksDBPrimaryIndex* primaryIndex,
bool reverse,
arangodb::velocypack::Slice const& left,
arangodb::velocypack::Slice const& right);
@ -88,12 +87,11 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
bool outOfRange() const;
arangodb::RocksDBVPackIndex const* _index;
arangodb::RocksDBPrimaryIndex* _primaryIndex;
rocksdb::Comparator const* _cmp;
std::unique_ptr<rocksdb::Iterator> _iterator;
bool const _reverse;
RocksDBKeyBounds _bounds;
rocksdb::Slice _upperBound; // used for iterate_upper_bound
rocksdb::Slice _upperBound; // used for iterate_upper_bound
};
class RocksDBVPackIndex : public RocksDBIndex {
@ -111,7 +109,8 @@ class RocksDBVPackIndex : public RocksDBIndex {
bool hasSelectivityEstimate() const override { return true; }
double selectivityEstimate(arangodb::StringRef const* = nullptr) const override;
double selectivityEstimate(
arangodb::StringRef const* = nullptr) const override;
size_t memory() const override;
@ -180,9 +179,9 @@ class RocksDBVPackIndex : public RocksDBIndex {
void recalculateEstimates() override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
protected:
Result postprocessRemove(transaction::Methods* trx, rocksdb::Slice const& key,
rocksdb::Slice const& value) override;
private:
bool isDuplicateOperator(arangodb::aql::AstNode const*,
@ -216,7 +215,8 @@ protected:
/// @brief helper function to build the key and value for rocksdb from the
/// vector of slices
/// @param hashes list of VPackSlice hashes for the estimator.
void addIndexValue(velocypack::Builder& leased, VPackSlice const& document,
void addIndexValue(velocypack::Builder& leased, TRI_voc_rid_t revisionId,
VPackSlice const& document,
std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes);
@ -226,8 +226,9 @@ protected:
/// @param elements vector of resulting index entries
/// @param sliceStack working list of values to insert into the index
/// @param hashes list of VPackSlice hashes for the estimator.
void buildIndexValues(velocypack::Builder& leased, VPackSlice const document,
size_t level, std::vector<RocksDBKey>& elements,
void buildIndexValues(velocypack::Builder& leased, TRI_voc_rid_t revisionId,
VPackSlice const document, size_t level,
std::vector<RocksDBKey>& elements,
std::vector<VPackSlice>& sliceStack,
std::vector<uint64_t>& hashes);
@ -248,7 +249,6 @@ protected:
/// On insertion of a document we have to insert it into the estimator,
/// On removal we have to remove it in the estimator as well.
std::unique_ptr<RocksDBCuckooIndexEstimator<uint64_t>> _estimator;
};
} // namespace arangodb

View File

@ -53,8 +53,8 @@ RocksDBValue RocksDBValue::IndexValue() {
return RocksDBValue(RocksDBEntryType::IndexValue);
}
RocksDBValue RocksDBValue::UniqueIndexValue(StringRef const& primaryKey) {
return RocksDBValue(RocksDBEntryType::UniqueIndexValue, primaryKey);
RocksDBValue RocksDBValue::UniqueIndexValue(TRI_voc_rid_t revisionId) {
return RocksDBValue(RocksDBEntryType::UniqueIndexValue, revisionId);
}
RocksDBValue RocksDBValue::View(VPackSlice const& data) {
@ -110,6 +110,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type) : _type(type), _buffer() {}
RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data)
: _type(type), _buffer() {
switch (_type) {
case RocksDBEntryType::UniqueIndexValue:
case RocksDBEntryType::PrimaryIndexValue: {
_buffer.reserve(sizeof(uint64_t));
uint64ToPersistent(_buffer, data); // revision id
@ -121,21 +122,6 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, uint64_t data)
}
}
RocksDBValue::RocksDBValue(RocksDBEntryType type,
arangodb::StringRef const& data)
: _type(type), _buffer() {
switch (_type) {
case RocksDBEntryType::UniqueIndexValue: {
_buffer.reserve(data.length());
_buffer.append(data.data(), data.length()); // primary key
break;
}
default:
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
}
}
RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data)
: _type(type), _buffer() {
switch (_type) {
@ -156,7 +142,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, VPackSlice const& data)
}
TRI_voc_rid_t RocksDBValue::revisionId(char const* data, uint64_t size) {
TRI_ASSERT(data != nullptr);
TRI_ASSERT(data != nullptr && size >= sizeof(uint64_t));
return uint64FromPersistent(data);
}

View File

@ -51,7 +51,7 @@ class RocksDBValue {
static RocksDBValue PrimaryIndexValue(TRI_voc_rid_t revisionId);
static RocksDBValue EdgeIndexValue();
static RocksDBValue IndexValue();
static RocksDBValue UniqueIndexValue(arangodb::StringRef const& primaryKey);
static RocksDBValue UniqueIndexValue(TRI_voc_rid_t revisionId);
static RocksDBValue View(VPackSlice const& data);
static RocksDBValue ReplicationApplierConfig(VPackSlice const& data);
@ -110,7 +110,6 @@ class RocksDBValue {
RocksDBValue();
explicit RocksDBValue(RocksDBEntryType type);
RocksDBValue(RocksDBEntryType type, uint64_t data);
RocksDBValue(RocksDBEntryType type, StringRef const& data);
RocksDBValue(RocksDBEntryType type, VPackSlice const& data);
private:

View File

@ -2713,12 +2713,18 @@ static void JS_TruncateVocbaseCol(
TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection");
}
// optionally specify non trx remove
bool unsafeTruncate = false;
if (args.Length() > 0) {
unsafeTruncate = TRI_ObjectToBoolean(args[0]);
}
auto t = unsafeTruncate ? AccessMode::Type::EXCLUSIVE : AccessMode::Type::WRITE;
SingleCollectionTransaction trx(
transaction::V8Context::Create(collection->vocbase(), true),
collection->cid(), AccessMode::Type::WRITE);
collection->cid(), t);
Result res = trx.begin();
if (!res.ok()) {
TRI_V8_THROW_EXCEPTION(res);
}

View File

@ -5,6 +5,7 @@
window.ClusterCoordinator = Backbone.Model.extend({
defaults: {
'name': '',
'id': '',
'status': 'ok',
'address': '',
'protocol': ''

View File

@ -326,10 +326,10 @@
this.clusterView.render();
},
node: function (name, initialized) {
node: function (id, initialized) {
this.checkUser();
if (!initialized || this.isCluster === undefined) {
this.waitForInit(this.node.bind(this), name);
this.waitForInit(this.node.bind(this), id);
return;
}
if (this.isCluster === false) {
@ -342,7 +342,7 @@
this.nodeView.remove();
}
this.nodeView = new window.NodeView({
coordname: name,
coordid: id,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers
});

View File

@ -17,7 +17,7 @@
if (window.App.isCluster) {
this.coordinators = options.coordinators;
this.dbServers = options.dbServers;
this.coordname = options.coordname;
this.coordid = options.coordid;
this.updateServerTime();
// start polling with interval
@ -47,8 +47,7 @@
var callback = function () {
this.continueRender();
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordname));
// window.arangoHelper.buildNodeSubNav(this.coordname, 'Dashboard', 'Logs')
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.coordid));
$(window).trigger('resize');
}.bind(this);
@ -59,8 +58,8 @@
if (!this.initDBDone) {
this.waitForDBServers(callback);
} else {
this.coordname = window.location.hash.split('/')[1];
this.coordinator = this.coordinators.findWhere({name: this.coordname});
this.coordid = window.location.hash.split('/')[1];
this.coordinator = this.coordinators.findWhere({id: this.coordid});
callback();
}
},
@ -79,7 +78,7 @@
raw: this.coordinator.get('address'),
isDBServer: false,
endpoint: this.coordinator.get('protocol') + '://' + this.coordinator.get('address'),
target: this.coordinator.get('name')
target: this.coordinator.get('id')
}
});
} else {
@ -113,7 +112,7 @@
if (self.coordinators.length === 0) {
self.waitForCoordinators(callback);
} else {
self.coordinator = self.coordinators.findWhere({name: self.coordname});
self.coordinator = self.coordinators.findWhere({id: self.coordid});
self.initCoordDone = true;
if (callback) {
callback();

View File

@ -362,7 +362,7 @@ class RandomDeviceCombined : public RandomDevice {
class RandomDeviceMersenne : public RandomDevice {
public:
RandomDeviceMersenne()
: engine(RandomDevice::seed()) {}
: engine((uint_fast32_t)RandomDevice::seed()) {}
uint32_t random() { return engine(); }
void seed(uint64_t seed) { engine.seed(static_cast<decltype(engine)::result_type>(seed)); }

View File

@ -29,4 +29,5 @@ if [[ -e $fpath ]]; then
fi
#execute
sudo cgexec -g memory:arango_mem su - $USER -c "$@"
sudo cgexec -g memory:arango_mem su -l -p -c "$@" $USER

View File

@ -222,12 +222,11 @@ SECTION("test_primary_index") {
/// @brief test edge index
SECTION("test_edge_index") {
RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), StringRef("foobar"));
RocksDBKey key1 = RocksDBKey::EdgeIndexValue(0, StringRef("a/1"), 33);
auto const& s1 = key1.string();
CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + strlen("foobar") + sizeof(char));
CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0foobar\x06", 20));
CHECK(s1.size() == sizeof(char) + sizeof(uint64_t) + strlen("a/1") + sizeof(char) + sizeof(uint64_t));
CHECK(s1 == std::string("5\0\0\0\0\0\0\0\0a/1\0!\0\0\0\0\0\0\0", 21));
}
}

View File

@ -47,10 +47,11 @@ void doFromToTest(double num){
template <typename T>
void doFromToTest(T num){
T x = num , y;
char s[sizeof(x)];
char* p = &s[0];
toPersistent(x,p);
y = fromPersistent<T>(p);
char s[sizeof(x)] = {0};
char* p1 = &s[0];
char* p2 = p1;
toPersistent(x,p1);
y = fromPersistent<T>(p2);
CHECK((x == y));
}