1
0
Fork 0

Make huge deletes go in smaller batches

This commit is contained in:
Simon Grätzer 2018-05-12 01:25:01 +02:00
parent 4703049c4e
commit 40b45bf4d1
2 changed files with 3 additions and 2 deletions

View File

@ -171,7 +171,8 @@ class ExecutionNode {
template<typename T, typename FromType>
static inline T castTo(FromType node) noexcept {
static_assert(std::is_pointer<T>::value, "invalid type passed into ExecutionNode::castTo");
static_assert(node->IsExecutionNode, "invalid type passed into ExecutionNode::castTo");
static_assert(std::is_pointer<FromType>::value, "invalid type passed into ExecutionNode::castTo");
static_assert(std::remove_pointer<FromType>::type::IsExecutionNode, "invalid type passed into ExecutionNode::castTo");
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
T result = dynamic_cast<T>(node);

View File

@ -198,7 +198,7 @@ Result removeLargeRange(rocksdb::TransactionDB* db,
++total;
++counter;
batch.Delete(cf, it->key());
if (counter == 1000) {
if (counter >= 200) {
LOG_TOPIC(DEBUG, Logger::FIXME) << "intermediate delete write";
// Persist deletes all 1000 documents
rocksdb::Status status = bDB->Write(rocksdb::WriteOptions(), &batch);