1
0
Fork 0

RocksDB drop collection now does intermedieate writes of batches.

Furthermore fixed an issue with Column Family usage in drop.

commit 708cd81362b56213dd4d7a69b1e4eea120de0db4
Merge: b8896ea 924ef44
Author: Michael Hackstein <michael@arangodb.com>
Date:   Thu Jun 8 14:47:56 2017 +0200

    Merge branch 'devel' of github.com:arangodb/arangodb into feature/drop-collection-batches

commit b8896ea5479d56f3e6d55d6094ec2658b058dc04
Author: Michael Hackstein <michael@arangodb.com>
Date:   Thu Jun 8 14:47:37 2017 +0200

    Fixed drop collection in rocksdb for all entries that are not compacted in a complete file. it was using the wrong column family.

commit 59f6a9d51b1c12d665eba683d3b09f1b01ce77fc
Author: Michael Hackstein <michael@arangodb.com>
Date:   Thu Jun 8 13:00:18 2017 +0200

    Remove large range now uses ColumnFamily comparator instead of default one. Also it writes batches of 1000 documents to RocksDB instead of full deletion at once.
This commit is contained in:
Michael Hackstein 2017-06-08 14:48:33 +02:00
parent 924ef445b0
commit 2e1d7b0fe1
1 changed files with 28 additions and 10 deletions

View File

@ -214,8 +214,8 @@ std::size_t countKeyRange(rocksdb::DB* db, rocksdb::ReadOptions const& opts,
Result removeLargeRange(rocksdb::TransactionDB* db,
RocksDBKeyBounds const& bounds) {
LOG_TOPIC(DEBUG, Logger::FIXME) << "removing large range: " << bounds;
try {
rocksdb::ColumnFamilyHandle* handle = bounds.columnFamily();
try {
// delete files in range lower..upper
rocksdb::Slice lower(bounds.start());
rocksdb::Slice upper(bounds.end());
@ -232,19 +232,37 @@ Result removeLargeRange(rocksdb::TransactionDB* db,
// go on and delete the remaining keys (delete files in range does not
// necessarily find them all, just complete files)
rocksdb::Comparator const* cmp = db->GetOptions().comparator;
rocksdb::Comparator const* cmp = handle->GetComparator();
rocksdb::WriteBatch batch;
rocksdb::ReadOptions readOptions;
readOptions.fill_cache = false;
std::unique_ptr<rocksdb::Iterator> it(db->NewIterator(readOptions));
std::unique_ptr<rocksdb::Iterator> it(db->NewIterator(readOptions, handle));
// TODO: split this into multiple batches if batches get too big
it->Seek(lower);
size_t counter = 0;
while (it->Valid() && cmp->Compare(it->key(), upper) < 0) {
TRI_ASSERT(cmp->Compare(it->key(), lower) > 0);
counter++;
batch.Delete(it->key());
it->Next();
if (counter == 1000) {
LOG_TOPIC(DEBUG, Logger::FIXME) << "Intermediate delete write";
// Persist deletes all 1000 documents
rocksdb::Status status = db->Write(rocksdb::WriteOptions(), &batch);
if (!status.ok()) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "RocksDB key deletion failed: " << status.ToString();
return TRI_ERROR_INTERNAL;
}
batch.Clear();
counter = 0;
}
}
if (counter > 0) {
LOG_TOPIC(DEBUG, Logger::FIXME) << "Remove large batch from bounds";
// We still have sth to write
// now apply deletion batch
rocksdb::Status status = db->Write(rocksdb::WriteOptions(), &batch);
@ -253,7 +271,7 @@ Result removeLargeRange(rocksdb::TransactionDB* db,
<< "RocksDB key deletion failed: " << status.ToString();
return TRI_ERROR_INTERNAL;
}
}
return TRI_ERROR_NO_ERROR;
} catch (arangodb::basics::Exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME)