mirror of https://gitee.com/bigwinds/arangodb
Truncate Fix (Nr 6) (#7176)
This commit is contained in:
parent
42fd0825ab
commit
f2f4a0fdaf
|
@ -77,7 +77,7 @@ void CollectionAccessingNode::toVelocyPack(arangodb::velocypack::Builder& builde
|
|||
builder.add("satellite", VPackValue(_collection->isSatellite()));
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
builder.add("numberOfShards", VPackValue(_collection->numberOfShards()));
|
||||
builder.add(StaticStrings::NumberOfShards, VPackValue(_collection->numberOfShards()));
|
||||
}
|
||||
|
||||
if (!_restrictedTo.empty()) {
|
||||
|
|
|
@ -299,12 +299,12 @@ void RestCollectionHandler::handleCommandPost() {
|
|||
}
|
||||
|
||||
// for some "security" a white-list of allowed parameters
|
||||
VPackBuilder filtered = VPackCollection::keep(
|
||||
body,
|
||||
VPackBuilder filtered = VPackCollection::keep(body,
|
||||
std::unordered_set<std::string>{
|
||||
"doCompact", "isSystem", "id", "isVolatile", "journalSize",
|
||||
"indexBuckets", "keyOptions", "waitForSync", "cacheEnabled",
|
||||
"shardKeys", "numberOfShards", "distributeShardsLike", "avoidServers",
|
||||
StaticStrings::ShardKeys, StaticStrings::NumberOfShards,
|
||||
StaticStrings::DistributeShardsLike, "avoidServers",
|
||||
"isSmart", "shardingStrategy", "smartGraphAttribute", "replicationFactor",
|
||||
"servers"});
|
||||
VPackSlice const parameters = filtered.slice();
|
||||
|
|
|
@ -1909,6 +1909,8 @@ RocksDBCollection::serializeIndexEstimates(
|
|||
rocksdb::Transaction* rtrx, rocksdb::SequenceNumber inputSeq) const {
|
||||
auto outputSeq = inputSeq;
|
||||
std::string output;
|
||||
RocksDBKey key;
|
||||
|
||||
for (auto index : getIndexes()) {
|
||||
output.clear();
|
||||
RocksDBIndex* cindex = static_cast<RocksDBIndex*>(index.get());
|
||||
|
@ -1923,7 +1925,6 @@ RocksDBCollection::serializeIndexEstimates(
|
|||
<< "serialized estimate for index '" << cindex->objectId()
|
||||
<< "' valid through seq " << outputSeq;
|
||||
if (output.size() > sizeof(uint64_t)) {
|
||||
RocksDBKey key;
|
||||
key.constructIndexEstimateValue(cindex->objectId());
|
||||
rocksdb::Slice value(output);
|
||||
rocksdb::Status s =
|
||||
|
|
|
@ -580,28 +580,29 @@ class RocksDBCuckooIndexEstimator {
|
|||
Result res = basics::catchVoidToResult([&]() -> void {
|
||||
std::vector<Key> inserts;
|
||||
std::vector<Key> removals;
|
||||
bool foundTruncate = false;
|
||||
|
||||
// truncate will increase this sequence
|
||||
rocksdb::SequenceNumber ignoreSeq = 0;
|
||||
while (true) {
|
||||
bool foundTruncate = false;
|
||||
// find out if we have buffers to apply
|
||||
{
|
||||
WRITE_LOCKER(locker, _lock);
|
||||
|
||||
rocksdb::SequenceNumber ignoreSeq = 0;
|
||||
// check for a truncate marker
|
||||
if (!_truncateBuffer.empty()) {
|
||||
auto it = _truncateBuffer.begin(); // sorted ASC
|
||||
while (*it <= commitSeq && *it >= ignoreSeq) {
|
||||
ignoreSeq = *it;
|
||||
foundTruncate = true;
|
||||
it = _truncateBuffer.erase(it);
|
||||
}
|
||||
auto it = _truncateBuffer.begin(); // sorted ASC
|
||||
while (it != _truncateBuffer.end() && *it <= commitSeq) {
|
||||
ignoreSeq = *it;
|
||||
TRI_ASSERT(ignoreSeq != 0);
|
||||
foundTruncate = true;
|
||||
it = _truncateBuffer.erase(it);
|
||||
}
|
||||
|
||||
|
||||
// check for inserts
|
||||
if (!_insertBuffers.empty()) {
|
||||
auto it = _insertBuffers.begin(); // sorted ASC
|
||||
if (it->first <= commitSeq) {
|
||||
if (!foundTruncate || it->first > ignoreSeq) {
|
||||
if (it->first >= ignoreSeq) {
|
||||
inserts = std::move(it->second);
|
||||
TRI_ASSERT(!inserts.empty());
|
||||
}
|
||||
|
@ -613,7 +614,7 @@ class RocksDBCuckooIndexEstimator {
|
|||
if (!_removalBuffers.empty()) {
|
||||
auto it = _removalBuffers.begin(); // sorted ASC
|
||||
if (it->first <= commitSeq) {
|
||||
if (!foundTruncate || it->first > ignoreSeq) {
|
||||
if (it->first >= ignoreSeq) {
|
||||
removals = std::move(it->second);
|
||||
TRI_ASSERT(!removals.empty());
|
||||
}
|
||||
|
@ -624,7 +625,6 @@ class RocksDBCuckooIndexEstimator {
|
|||
|
||||
if (foundTruncate) {
|
||||
clear(); // clear estimates
|
||||
foundTruncate = false;
|
||||
}
|
||||
|
||||
// no inserts or removals left to apply, drop out of loop
|
||||
|
|
|
@ -282,7 +282,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
|||
}
|
||||
}
|
||||
|
||||
/// Truncate indexes of collection
|
||||
/// Truncate indexes of collection with objectId
|
||||
bool truncateIndexes(uint64_t objectId) {
|
||||
RocksDBEngine* engine =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
|
@ -304,9 +304,10 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
|||
}
|
||||
|
||||
for (auto const& idx : coll->getIndexes()) {
|
||||
LOG_DEVEL << "truncating index: " << idx->typeName();
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(idx.get());
|
||||
RocksDBCuckooIndexEstimator<uint64_t>* est = ridx->estimator();
|
||||
if (est) {
|
||||
if (est && est->commitSeq() < currentSeqNum) {
|
||||
est->bufferTruncate(currentSeqNum);
|
||||
}
|
||||
}
|
||||
|
@ -314,6 +315,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
|||
return true;
|
||||
}
|
||||
|
||||
// find estimator for index
|
||||
RocksDBCuckooIndexEstimator<uint64_t>* findEstimator(uint64_t objectId) {
|
||||
RocksDBEngine* engine =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE);
|
||||
|
@ -536,12 +538,12 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
|
|||
ops->removed = 0;
|
||||
ops->added = 0;
|
||||
ops->mustTruncate = true;
|
||||
|
||||
if (!truncateIndexes(objectId)) {
|
||||
// unable to truncate indexes of the collection.
|
||||
// may be due to collection having been deleted etc.
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "unable to truncate indexes for objectId " << objectId;
|
||||
}
|
||||
}
|
||||
// index estimates have their own commitSeq
|
||||
if (!truncateIndexes(objectId)) {
|
||||
// unable to truncate indexes of the collection.
|
||||
// may be due to collection having been deleted etc.
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "unable to truncate indexes for objectId " << objectId;
|
||||
}
|
||||
|
||||
_lastRemovedDocRid = 0; // reset in any other case
|
||||
|
|
Loading…
Reference in New Issue